#include <sys/types.h>
#include <sys/systm.h>
#include <sys/errno.h>
#include <sys/kmem.h>
#include <sys/buf.h>
#include <sys/vnode.h>
#include <sys/vfs.h>
#include <sys/user.h>
#include <sys/callb.h>
#include <sys/cpuvar.h>
#include <sys/fs/ufs_inode.h>
#include <sys/fs/ufs_log.h>
#include <sys/fs/ufs_trans.h>
#include <sys/fs/ufs_acl.h>
#include <sys/fs/ufs_bio.h>
#include <sys/fs/ufs_fsdir.h>
#include <sys/debug.h>
#include <sys/cmn_err.h>
#include <sys/sysmacros.h>
#include <vm/pvn.h>
extern pri_t minclsyspri;
extern int hash2ints();
extern struct kmem_cache *inode_cache;
extern int ufs_idle_waiters;
extern struct instats ins;
static void ufs_attr_purge(struct inode *);
void
ufs_thread_init(struct ufs_q *uq, int lowat)
{
bzero((caddr_t)uq, sizeof (*uq));
cv_init(&uq->uq_cv, NULL, CV_DEFAULT, NULL);
mutex_init(&uq->uq_mutex, NULL, MUTEX_DEFAULT, NULL);
uq->uq_lowat = lowat;
uq->uq_hiwat = 2 * lowat;
uq->uq_threadp = NULL;
}
void
ufs_thread_start(struct ufs_q *uq, void (*func)(), struct vfs *vfsp)
{
mutex_enter(&uq->uq_mutex);
if (uq->uq_threadp == NULL) {
uq->uq_threadp = thread_create(NULL, 0, func, vfsp, 0, &p0,
TS_RUN, minclsyspri);
uq->uq_flags = 0;
}
mutex_exit(&uq->uq_mutex);
}
void
ufs_thread_exit(struct ufs_q *uq)
{
kt_did_t ufs_thread_did = 0;
mutex_enter(&uq->uq_mutex);
uq->uq_flags &= ~(UQ_SUSPEND | UQ_SUSPENDED);
if (uq->uq_threadp != NULL) {
ufs_thread_did = uq->uq_threadp->t_did;
uq->uq_flags |= (UQ_EXIT|UQ_WAIT);
cv_broadcast(&uq->uq_cv);
}
mutex_exit(&uq->uq_mutex);
if (ufs_thread_did)
thread_join(ufs_thread_did);
}
void
ufs_thread_suspend(struct ufs_q *uq)
{
mutex_enter(&uq->uq_mutex);
if (uq->uq_threadp != NULL) {
while ((uq->uq_flags & UQ_SUSPEND) &&
(uq->uq_threadp != NULL)) {
uq->uq_flags |= UQ_WAIT;
cv_wait(&uq->uq_cv, &uq->uq_mutex);
}
uq->uq_flags |= (UQ_SUSPEND | UQ_WAIT);
if ((uq->uq_flags & UQ_SUSPENDED) == 0 &&
(uq->uq_threadp != NULL)) {
cv_broadcast(&uq->uq_cv);
}
while (((uq->uq_flags & UQ_SUSPENDED) == 0) &&
(uq->uq_threadp != NULL)) {
cv_wait(&uq->uq_cv, &uq->uq_mutex);
}
}
mutex_exit(&uq->uq_mutex);
}
void
ufs_thread_continue(struct ufs_q *uq)
{
mutex_enter(&uq->uq_mutex);
uq->uq_flags &= ~(UQ_SUSPEND | UQ_SUSPENDED);
cv_broadcast(&uq->uq_cv);
mutex_exit(&uq->uq_mutex);
}
int
ufs_thread_run(struct ufs_q *uq, callb_cpr_t *cprinfop)
{
again:
ASSERT(uq->uq_ne >= 0);
if (uq->uq_flags & UQ_SUSPEND) {
uq->uq_flags |= UQ_SUSPENDED;
} else if (uq->uq_flags & UQ_EXIT) {
if (uq->uq_ne)
return (uq->uq_ne);
uq->uq_threadp = NULL;
if (uq->uq_flags & UQ_WAIT) {
cv_broadcast(&uq->uq_cv);
}
uq->uq_flags &= ~(UQ_EXIT | UQ_WAIT);
CALLB_CPR_EXIT(cprinfop);
thread_exit();
} else if (uq->uq_ne >= uq->uq_lowat) {
return (uq->uq_ne - (uq->uq_lowat >> 1));
}
if (uq->uq_flags & UQ_WAIT) {
uq->uq_flags &= ~UQ_WAIT;
cv_broadcast(&uq->uq_cv);
}
CALLB_CPR_SAFE_BEGIN(cprinfop);
cv_wait(&uq->uq_cv, &uq->uq_mutex);
CALLB_CPR_SAFE_END(cprinfop, &uq->uq_mutex);
goto again;
}
void
ufs_delete(struct ufsvfs *ufsvfsp, struct inode *ip, int dolockfs)
{
ushort_t mode;
struct vnode *vp = ITOV(ip);
struct ulockfs *ulp;
int trans_size;
int dorwlock = ((ip->i_mode & IFMT) == IFREG);
int issync;
int err;
struct inode *dp;
struct ufs_q *delq = &ufsvfsp->vfs_delete;
struct ufs_delq_info *delq_info = &ufsvfsp->vfs_delete_info;
if (ULOCKFS_IS_NOIDEL(ITOUL(ip))) {
mutex_enter(&delq->uq_mutex);
delq_info->delq_unreclaimed_blocks -= ip->i_blocks;
delq_info->delq_unreclaimed_files--;
mutex_exit(&delq->uq_mutex);
VN_RELE(vp);
return;
}
if ((vp->v_count > 1) || (ip->i_mode == 0)) {
mutex_enter(&delq->uq_mutex);
delq_info->delq_unreclaimed_blocks -= ip->i_blocks;
delq_info->delq_unreclaimed_files--;
mutex_exit(&delq->uq_mutex);
VN_RELE(vp);
return;
}
if (dolockfs) {
if (ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_DELETE_MASK))
return;
} else {
if (curthread->t_flag & T_DONTBLOCK) {
ulp = NULL;
} else {
ulp = &ufsvfsp->vfs_ulockfs;
curthread->t_flag |= T_DONTBLOCK;
}
}
if (dorwlock)
rw_enter(&ip->i_rwlock, RW_WRITER);
if (ip->i_oeftflag != 0) {
TRANS_BEGIN_CSYNC(ufsvfsp, issync, TOP_REMOVE,
trans_size = (int)TOP_REMOVE_SIZE(ip));
rw_enter(&ip->i_contents, RW_WRITER);
err = ufs_iget(ip->i_vfs, ip->i_oeftflag,
&dp, CRED());
if (err == 0) {
rw_enter(&dp->i_rwlock, RW_WRITER);
rw_enter(&dp->i_contents, RW_WRITER);
dp->i_flag |= IUPD|ICHG;
dp->i_seq++;
TRANS_INODE(dp->i_ufsvfs, dp);
dp->i_nlink -= 2;
ufs_setreclaim(dp);
dnlc_remove(ITOV(dp), ".");
dnlc_remove(ITOV(dp), "..");
ITIMES_NOLOCK(dp);
if (!TRANS_ISTRANS(ufsvfsp)) {
ufs_iupdat(dp, I_SYNC);
}
rw_exit(&dp->i_contents);
rw_exit(&dp->i_rwlock);
VN_RELE(ITOV(dp));
}
ip->i_oeftflag = 0;
rw_exit(&ip->i_contents);
TRANS_END_CSYNC(ufsvfsp, err, issync,
TOP_REMOVE, trans_size);
dnlc_remove(ITOV(ip), XATTR_DIR_NAME);
}
if ((ip->i_mode & IFMT) == IFATTRDIR) {
ufs_attr_purge(ip);
}
(void) TRANS_ITRUNC(ip, (u_offset_t)0, I_FREE | I_ACCT, CRED());
if (ulp) {
trans_size = TOP_IFREE_SIZE(ip);
TRANS_BEGIN_ASYNC(ufsvfsp, TOP_IFREE, trans_size);
}
rw_enter(&ufsvfsp->vfs_dqrwlock, RW_READER);
rw_enter(&ip->i_contents, RW_WRITER);
TRANS_INODE(ufsvfsp, ip);
mode = ip->i_mode;
ip->i_mode = 0;
ip->i_rdev = 0;
ip->i_ordev = 0;
ip->i_flag |= IMOD;
if (ip->i_ufs_acl) {
(void) ufs_si_free(ip->i_ufs_acl, vp->v_vfsp, CRED());
ip->i_ufs_acl = NULL;
ip->i_shadow = 0;
}
mutex_enter(&vp->v_lock);
vn_recycle(vp);
mutex_exit(&vp->v_lock);
ufs_ifree(ip, ip->i_number, mode);
(void) chkiq((struct ufsvfs *)vp->v_vfsp->vfs_data,
-1, ip, (uid_t)ip->i_uid, 0, CRED(),
(char **)NULL, (size_t *)NULL);
dqrele(ip->i_dquot);
ip->i_dquot = NULL;
ip->i_flag &= ~(IDEL | IDIRECTIO);
ip->i_cflags = 0;
if (!TRANS_ISTRANS(ufsvfsp)) {
ufs_iupdat(ip, I_SYNC);
} else {
mutex_enter(&delq->uq_mutex);
delq_info->delq_unreclaimed_files--;
mutex_exit(&delq->uq_mutex);
}
rw_exit(&ip->i_contents);
rw_exit(&ufsvfsp->vfs_dqrwlock);
if (dorwlock)
rw_exit(&ip->i_rwlock);
VN_RELE(vp);
if (ulp) {
TRANS_END_ASYNC(ufsvfsp, TOP_IFREE, trans_size);
if (dolockfs)
ufs_lockfs_end(ulp);
else
curthread->t_flag &= ~T_DONTBLOCK;
}
}
void
ufs_delete_init(struct ufsvfs *ufsvfsp, int lowat)
{
struct ufs_delq_info *delq_info = &ufsvfsp->vfs_delete_info;
ufs_thread_init(&ufsvfsp->vfs_delete, lowat);
(void) memset((void *)delq_info, 0, sizeof (*delq_info));
}
void
ufs_thread_delete(struct vfs *vfsp)
{
struct ufsvfs *ufsvfsp = (struct ufsvfs *)vfsp->vfs_data;
struct ufs_q *uq = &ufsvfsp->vfs_delete;
struct inode *ip;
long ne;
callb_cpr_t cprinfo;
CALLB_CPR_INIT(&cprinfo, &uq->uq_mutex, callb_generic_cpr,
"ufsdelete");
mutex_enter(&uq->uq_mutex);
again:
ne = ufs_thread_run(uq, &cprinfo) ? 1 : 0;
if (ne && (ip = uq->uq_ihead)) {
if ((uq->uq_ihead = ip->i_freef) == ip)
uq->uq_ihead = NULL;
ip->i_freef->i_freeb = ip->i_freeb;
ip->i_freeb->i_freef = ip->i_freef;
ip->i_freef = ip;
ip->i_freeb = ip;
uq->uq_ne--;
mutex_exit(&uq->uq_mutex);
ufs_delete(ufsvfsp, ip, 1);
mutex_enter(&uq->uq_mutex);
}
goto again;
}
void
ufs_delete_drain(struct vfs *vfsp, int ne, int dolockfs)
{
struct ufsvfs *ufsvfsp = (struct ufsvfs *)vfsp->vfs_data;
struct ufs_q *uq;
struct inode *ip;
int drain_cnt = 0;
int done;
if (ufsvfsp == NULL)
return;
uq = &ufsvfsp->vfs_delete;
mutex_enter(&uq->uq_mutex);
if (ne == 0)
drain_cnt = uq->uq_ne;
else if (ne > 0)
drain_cnt = ne;
done = 0;
while (!done && (ip = uq->uq_ihead)) {
if (ne != -1)
drain_cnt--;
if (ne != -1 && drain_cnt == 0)
done = 1;
if ((uq->uq_ihead = ip->i_freef) == ip)
uq->uq_ihead = NULL;
ip->i_freef->i_freeb = ip->i_freeb;
ip->i_freeb->i_freef = ip->i_freef;
ip->i_freef = ip;
ip->i_freeb = ip;
uq->uq_ne--;
mutex_exit(&uq->uq_mutex);
ufs_delete(ufsvfsp, ip, dolockfs);
mutex_enter(&uq->uq_mutex);
}
mutex_exit(&uq->uq_mutex);
}
void
ufs_sync_with_thread(struct ufs_q *uq)
{
mutex_enter(&uq->uq_mutex);
if ((uq->uq_flags & UQ_WAIT) == 0) {
uq->uq_flags |= UQ_WAIT;
cv_broadcast(&uq->uq_cv);
}
while ((uq->uq_threadp != NULL) && (uq->uq_flags & UQ_WAIT)) {
cv_wait(&uq->uq_cv, &uq->uq_mutex);
}
mutex_exit(&uq->uq_mutex);
}
void
ufs_delete_drain_wait(struct ufsvfs *ufsvfsp, int dolockfs)
{
struct ufs_q *uq = &ufsvfsp->vfs_delete;
int error;
struct ufs_q *delq = &ufsvfsp->vfs_delete;
struct ufs_delq_info *delq_info = &ufsvfsp->vfs_delete_info;
mutex_enter(&delq->uq_mutex);
if (delq_info->delq_unreclaimed_files > 0) {
mutex_exit(&delq->uq_mutex);
(void) ufs_delete_drain(ufsvfsp->vfs_vfs, 0, dolockfs);
ufs_sync_with_thread(uq);
} else {
ASSERT(delq_info->delq_unreclaimed_files == 0);
mutex_exit(&delq->uq_mutex);
return;
}
curthread->t_flag |= T_DONTBLOCK;
TRANS_BEGIN_SYNC(ufsvfsp, TOP_COMMIT_UPDATE, TOP_COMMIT_SIZE, error);
if (!error) {
TRANS_END_SYNC(ufsvfsp, error, TOP_COMMIT_UPDATE,
TOP_COMMIT_SIZE);
}
curthread->t_flag &= ~T_DONTBLOCK;
}
void
ufs_delete_adjust_stats(struct ufsvfs *ufsvfsp, struct statvfs64 *sp)
{
struct ufs_q *uq = &ufsvfsp->vfs_delete;
struct ufs_delq_info *delq_info = &ufsvfsp->vfs_delete_info;
mutex_enter(&uq->uq_mutex);
sp->f_bfree += dbtofsb(ufsvfsp->vfs_fs,
delq_info->delq_unreclaimed_blocks);
sp->f_ffree += delq_info->delq_unreclaimed_files;
mutex_exit(&uq->uq_mutex);
}
static void
ufs_idle_free(struct inode *ip)
{
int pages;
int hno;
kmutex_t *ihm;
struct ufsvfs *ufsvfsp = ip->i_ufsvfs;
struct vnode *vp = ITOV(ip);
int vn_has_data, vn_modified;
pages = (ip->i_mode && vn_has_cached_data(vp) && vp->v_type != VCHR);
if ((ip->i_flag & ISTALE) == 0) {
(void) TRANS_SYNCIP(ip, B_ASYNC, I_ASYNC, TOP_SYNCIP_FREE);
(void) TRANS_SYNCIP(ip,
(TRANS_ISERROR(ufsvfsp)) ? B_INVAL | B_FORCE : B_INVAL,
I_ASYNC, TOP_SYNCIP_FREE);
}
ASSERT(ip->i_number != 0);
hno = INOHASH(ip->i_number);
ihm = &ih_lock[hno];
mutex_enter(ihm);
mutex_enter(&vp->v_lock);
VERIFY3U(vp->v_count, >=, 2);
VN_RELE_LOCKED(vp);
vn_has_data = (vp->v_type != VCHR && vn_has_cached_data(vp));
vn_modified = (ip->i_flag & (IMOD|IMODACC|IACC|ICHG|IUPD|IATTCHG));
if (vp->v_count != 1 ||
((vn_has_data || vn_modified) &&
((ip->i_flag & ISTALE) == 0))) {
mutex_exit(&vp->v_lock);
mutex_exit(ihm);
VN_RELE(vp);
} else {
mutex_exit(&vp->v_lock);
remque(ip);
mutex_exit(ihm);
if ((ip->i_flag & ISTALE) == 0 && ip->i_dquot) {
TRANS_DQRELE(ufsvfsp, ip->i_dquot);
ip->i_dquot = NULL;
}
if ((ip->i_flag & ISTALE) &&
vn_has_data) {
(void) pvn_vplist_dirty(vp, (u_offset_t)0,
ufs_putapage, B_INVAL | B_TRUNC,
(struct cred *)NULL);
}
ufs_si_del(ip);
if (pages) {
CPU_STATS_ADDQ(CPU, sys, ufsipage, 1);
} else {
CPU_STATS_ADDQ(CPU, sys, ufsinopage, 1);
}
ASSERT((vp->v_type == VCHR) || !vn_has_cached_data(vp));
ASSERT(vp->v_count == 1);
ufs_free_inode(ip);
}
}
iqhead_t *ufs_junk_iq;
iqhead_t *ufs_useful_iq;
int ufs_njunk_iq = 0;
int ufs_nuseful_iq = 0;
int ufs_niqhash;
int ufs_iqhashmask;
struct ufs_q ufs_idle_q;
void
ufs_thread_idle(void)
{
callb_cpr_t cprinfo;
int i;
int ne;
ufs_niqhash = (ufs_idle_q.uq_lowat >> 1) / IQHASHQLEN;
ufs_niqhash = 1 << highbit(ufs_niqhash);
ufs_iqhashmask = ufs_niqhash - 1;
ufs_junk_iq = kmem_alloc(ufs_niqhash * sizeof (*ufs_junk_iq),
KM_SLEEP);
ufs_useful_iq = kmem_alloc(ufs_niqhash * sizeof (*ufs_useful_iq),
KM_SLEEP);
for (i = 0; i < ufs_niqhash; i++) {
ufs_junk_iq[i].i_freef = (inode_t *)&ufs_junk_iq[i];
ufs_junk_iq[i].i_freeb = (inode_t *)&ufs_junk_iq[i];
ufs_useful_iq[i].i_freef = (inode_t *)&ufs_useful_iq[i];
ufs_useful_iq[i].i_freeb = (inode_t *)&ufs_useful_iq[i];
}
CALLB_CPR_INIT(&cprinfo, &ufs_idle_q.uq_mutex, callb_generic_cpr,
"ufsidle");
again:
mutex_enter(&ufs_idle_q.uq_mutex);
if (ufs_idle_q.uq_ne < ufs_idle_q.uq_lowat) {
CALLB_CPR_SAFE_BEGIN(&cprinfo);
cv_wait(&ufs_idle_q.uq_cv, &ufs_idle_q.uq_mutex);
CALLB_CPR_SAFE_END(&cprinfo, &ufs_idle_q.uq_mutex);
}
mutex_exit(&ufs_idle_q.uq_mutex);
ne = ufs_idle_q.uq_ne >> 1;
ins.in_tidles.value.ul += ne;
ufs_idle_some(ne);
goto again;
}
void
ufs_inode_cache_reclaim(void *cdrarg)
{
if (ufs_idle_q.uq_ne < (ufs_idle_q.uq_lowat >> 1))
return;
mutex_enter(&ufs_idle_q.uq_mutex);
cv_broadcast(&ufs_idle_q.uq_cv);
mutex_exit(&ufs_idle_q.uq_mutex);
}
void
ufs_idle_some(int ne)
{
int i;
struct inode *ip;
struct vnode *vp;
static int junk_rotor = 0;
static int useful_rotor = 0;
for (i = 0; i < ne; ++i) {
mutex_enter(&ufs_idle_q.uq_mutex);
if (ufs_njunk_iq) {
while (ufs_junk_iq[junk_rotor].i_freef ==
(inode_t *)&ufs_junk_iq[junk_rotor]) {
junk_rotor = IQNEXT(junk_rotor);
}
ip = ufs_junk_iq[junk_rotor].i_freef;
ASSERT(ip->i_flag & IJUNKIQ);
} else if (ufs_nuseful_iq) {
while (ufs_useful_iq[useful_rotor].i_freef ==
(inode_t *)&ufs_useful_iq[useful_rotor]) {
useful_rotor = IQNEXT(useful_rotor);
}
ip = ufs_useful_iq[useful_rotor].i_freef;
ASSERT(!(ip->i_flag & IJUNKIQ));
} else {
mutex_exit(&ufs_idle_q.uq_mutex);
return;
}
vp = ITOV(ip);
VN_HOLD(vp);
mutex_exit(&ufs_idle_q.uq_mutex);
rw_enter(&ip->i_contents, RW_WRITER);
if (ufs_rmidle(ip)) {
rw_exit(&ip->i_contents);
ufs_idle_free(ip);
} else {
rw_exit(&ip->i_contents);
VN_RELE(vp);
}
}
}
void
ufs_idle_drain(struct vfs *vfsp)
{
struct inode *ip, *nip;
struct inode *ianchor = NULL;
int i;
mutex_enter(&ufs_idle_q.uq_mutex);
if (ufs_njunk_iq) {
for (i = 0; i < ufs_niqhash; i++) {
for (ip = ufs_junk_iq[i].i_freef;
ip != (inode_t *)&ufs_junk_iq[i];
ip = ip->i_freef) {
if (ip->i_vfs == vfsp || vfsp == NULL) {
VN_HOLD(ITOV(ip));
mutex_exit(&ufs_idle_q.uq_mutex);
rw_enter(&ip->i_contents, RW_WRITER);
if (ufs_rmidle(ip)) {
rw_exit(&ip->i_contents);
ip->i_freef = ianchor;
ianchor = ip;
} else {
rw_exit(&ip->i_contents);
VN_RELE(ITOV(ip));
}
ip = (inode_t *)&ufs_junk_iq[i];
mutex_enter(&ufs_idle_q.uq_mutex);
}
}
}
}
if (ufs_nuseful_iq) {
for (i = 0; i < ufs_niqhash; i++) {
for (ip = ufs_useful_iq[i].i_freef;
ip != (inode_t *)&ufs_useful_iq[i];
ip = ip->i_freef) {
if (ip->i_vfs == vfsp || vfsp == NULL) {
VN_HOLD(ITOV(ip));
mutex_exit(&ufs_idle_q.uq_mutex);
rw_enter(&ip->i_contents, RW_WRITER);
if (ufs_rmidle(ip)) {
rw_exit(&ip->i_contents);
ip->i_freef = ianchor;
ianchor = ip;
} else {
rw_exit(&ip->i_contents);
VN_RELE(ITOV(ip));
}
ip = (inode_t *)&ufs_useful_iq[i];
mutex_enter(&ufs_idle_q.uq_mutex);
}
}
}
}
mutex_exit(&ufs_idle_q.uq_mutex);
for (ip = ianchor; ip; ip = nip) {
nip = ip->i_freef;
ip->i_freef = ip;
ufs_idle_free(ip);
}
}
void
ufs_thread_reclaim(struct vfs *vfsp)
{
struct ufsvfs *ufsvfsp = (struct ufsvfs *)vfsp->vfs_data;
struct ufs_q *uq = &ufsvfsp->vfs_reclaim;
struct fs *fs = ufsvfsp->vfs_fs;
struct buf *bp = 0;
int err = 0;
daddr_t bno;
ino_t ino;
struct dinode *dp;
struct inode *ip;
callb_cpr_t cprinfo;
CALLB_CPR_INIT(&cprinfo, &uq->uq_mutex, callb_generic_cpr,
"ufsreclaim");
if ((fs->fs_reclaim & FS_RECLAIMING) == 0)
err++;
if (fs->fs_ronly)
err++;
for (ino = 0; ino < (fs->fs_ncg * fs->fs_ipg) && !err; ++ino) {
mutex_enter(&uq->uq_mutex);
again:
if (uq->uq_flags & UQ_EXIT) {
err++;
mutex_exit(&uq->uq_mutex);
break;
} else if (uq->uq_flags & UQ_SUSPEND) {
uq->uq_flags |= UQ_SUSPENDED;
if (bp) {
brelse(bp);
bp = 0;
}
if (uq->uq_flags & UQ_WAIT) {
uq->uq_flags &= ~UQ_WAIT;
cv_broadcast(&uq->uq_cv);
}
CALLB_CPR_SAFE_BEGIN(&cprinfo);
cv_wait(&uq->uq_cv, &uq->uq_mutex);
CALLB_CPR_SAFE_END(&cprinfo, &uq->uq_mutex);
goto again;
}
mutex_exit(&uq->uq_mutex);
bno = fsbtodb(fs, itod(fs, ino));
if ((bp == 0) || (bp->b_blkno != bno)) {
if (bp)
brelse(bp);
bp = UFS_BREAD(ufsvfsp,
ufsvfsp->vfs_dev, bno, fs->fs_bsize);
bp->b_flags |= B_AGE;
}
if (bp->b_flags & B_ERROR) {
err++;
continue;
}
dp = (struct dinode *)bp->b_un.b_addr + itoo(fs, ino);
if ((dp->di_nlink <= 0) && (dp->di_mode != 0)) {
brelse(bp);
bp = 0;
rw_enter(&ufsvfsp->vfs_dqrwlock, RW_READER);
if (ufs_iget(vfsp, ino, &ip, CRED()))
err++;
else
VN_RELE(ITOV(ip));
rw_exit(&ufsvfsp->vfs_dqrwlock);
}
}
if (bp)
brelse(bp);
if (!err) {
mutex_enter(&ufsvfsp->vfs_lock);
fs->fs_reclaim &= ~FS_RECLAIMING;
mutex_exit(&ufsvfsp->vfs_lock);
TRANS_SBWRITE(ufsvfsp, TOP_SBWRITE_RECLAIM);
}
mutex_enter(&uq->uq_mutex);
uq->uq_threadp = NULL;
uq->uq_flags &= ~UQ_WAIT;
cv_broadcast(&uq->uq_cv);
CALLB_CPR_EXIT(&cprinfo);
thread_exit();
}
struct ufs_q ufs_hlock;
void
ufs_thread_hlock(void *ignore)
{
int retry;
callb_cpr_t cprinfo;
CALLB_CPR_INIT(&cprinfo, &ufs_hlock.uq_mutex, callb_generic_cpr,
"ufshlock");
for (;;) {
mutex_enter(&ufs_hlock.uq_mutex);
(void) ufs_thread_run(&ufs_hlock, &cprinfo);
ufs_hlock.uq_ne = 0;
mutex_exit(&ufs_hlock.uq_mutex);
do {
retry = ufs_trans_hlock();
if (retry) {
mutex_enter(&ufs_hlock.uq_mutex);
CALLB_CPR_SAFE_BEGIN(&cprinfo);
(void) cv_reltimedwait(&ufs_hlock.uq_cv,
&ufs_hlock.uq_mutex, hz, TR_CLOCK_TICK);
CALLB_CPR_SAFE_END(&cprinfo,
&ufs_hlock.uq_mutex);
mutex_exit(&ufs_hlock.uq_mutex);
}
} while (retry);
}
}
static void
ufs_attr_purge(struct inode *dp)
{
int err;
int error;
off_t dirsize;
off_t offset;
int entryoffsetinblk;
struct inode *tp;
struct fbuf *fbp;
struct direct *ep;
int trans_size;
int issync;
struct ufsvfs *ufsvfsp = dp->i_ufsvfs;
rw_enter(&ufsvfsp->vfs_dqrwlock, RW_READER);
fbp = NULL;
dirsize = roundup(dp->i_size, DIRBLKSIZ);
offset = 0;
entryoffsetinblk = 0;
dnlc_dir_purge(&dp->i_danchor);
while (offset < dirsize) {
if (blkoff(dp->i_fs, offset) == 0) {
if (fbp != NULL) {
fbrelse(fbp, S_OTHER);
}
err = blkatoff(dp, offset, (char **)0, &fbp);
if (err) {
goto out;
}
entryoffsetinblk = 0;
}
ep = (struct direct *)(fbp->fb_addr + entryoffsetinblk);
if (ep->d_ino == 0 || (ep->d_name[0] == '.' &&
ep->d_name[1] == '\0') ||
(ep->d_name[0] == '.' && ep->d_name[1] == '.' &&
ep->d_name[2] == '\0')) {
entryoffsetinblk += ep->d_reclen;
} else {
if ((err = ufs_iget(dp->i_vfs, ep->d_ino,
&tp, CRED())) != 0) {
goto out;
}
TRANS_BEGIN_CSYNC(ufsvfsp, issync, TOP_REMOVE,
trans_size = (int)TOP_REMOVE_SIZE(tp));
dnlc_remove(ITOV(dp), ep->d_name);
rw_enter(&tp->i_contents, RW_WRITER);
tp->i_flag |= ICHG;
tp->i_seq++;
TRANS_INODE(tp->i_ufsvfs, tp);
tp->i_nlink--;
ufs_setreclaim(tp);
ITIMES_NOLOCK(tp);
rw_exit(&tp->i_contents);
VN_RELE(ITOV(tp));
entryoffsetinblk += ep->d_reclen;
TRANS_END_CSYNC(ufsvfsp, error,
issync, TOP_REMOVE, trans_size);
}
offset += ep->d_reclen;
}
if (fbp) {
fbrelse(fbp, S_OTHER);
}
out:
rw_exit(&ufsvfsp->vfs_dqrwlock);
}