#include <sys/param.h>
#include <sys/fcntl.h>
#include <sys/lock.h>
#include <sys/flock.h>
#include <sys/mount.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/share.h>
#include <sys/syslog.h>
#include <sys/systm.h>
#include <sys/unistd.h>
#include <sys/vnode.h>
#include <sys/queue.h>
#include <sys/sdt.h>
#include <netinet/in.h>
#include <fs/fs_subr.h>
#include <rpcsvc/nlm_prot.h>
#include <nfs/nfs.h>
#include <nfs/nfs_clnt.h>
#include <nfs/export.h>
#include <nfs/rnode.h>
#include <nfs/lm.h>
#include "nlm_impl.h"
#define NLM_X_RECLAIM 1
#define NLM_X_BLOCKING 2
#define NLM_CANCEL_NRETRS 5
#define NLM_FLOCK_IS_SAFE(flp) \
((flp)->l_start == 0 && (flp)->l_len == 0)
static volatile uint32_t nlm_xid = 1;
static int nlm_init_fh_by_vp(vnode_t *, struct netobj *, rpcvers_t *);
static int nlm_map_status(nlm4_stats);
static int nlm_map_clnt_stat(enum clnt_stat);
static void nlm_send_siglost(pid_t);
static int nlm_frlock_getlk(struct nlm_host *, vnode_t *,
struct flock64 *, int, u_offset_t, struct netobj *, int);
static int nlm_frlock_setlk(struct nlm_host *, vnode_t *,
struct flock64 *, int, u_offset_t, struct netobj *,
struct flk_callback *, int, bool_t);
static int nlm_reclaim_lock(struct nlm_host *, vnode_t *,
struct flock64 *, int32_t);
static void nlm_init_lock(struct nlm4_lock *,
const struct flock64 *, struct netobj *,
struct nlm_owner_handle *);
static int nlm_call_lock(vnode_t *, struct flock64 *,
struct nlm_host *, struct netobj *,
struct flk_callback *, int, int);
static int nlm_call_unlock(struct flock64 *, struct nlm_host *,
struct netobj *, int);
static int nlm_call_test(struct flock64 *, struct nlm_host *,
struct netobj *, int);
static int nlm_call_cancel(struct nlm4_lockargs *,
struct nlm_host *, int);
static int nlm_local_getlk(vnode_t *, struct flock64 *, int);
static int nlm_local_setlk(vnode_t *, struct flock64 *, int);
static void nlm_local_cancelk(vnode_t *, struct flock64 *);
static void nlm_init_share(struct nlm4_share *,
const struct shrlock *, struct netobj *);
static int nlm_call_share(struct shrlock *, struct nlm_host *,
struct netobj *, int, int);
static int nlm_call_unshare(struct shrlock *, struct nlm_host *,
struct netobj *, int);
static int nlm_reclaim_share(struct nlm_host *, vnode_t *,
struct shrlock *, uint32_t);
static int nlm_local_shrlock(vnode_t *, struct shrlock *, int, int);
static void nlm_local_shrcancel(vnode_t *, struct shrlock *);
void
nlm_reclaim_client(struct nlm_globals *g, struct nlm_host *hostp)
{
int32_t state;
int error, sysid;
struct locklist *llp_head, *llp;
struct nlm_shres *nsp_head, *nsp;
bool_t restart;
sysid = hostp->nh_sysid | LM_SYSID_CLIENT;
do {
error = 0;
restart = FALSE;
state = nlm_host_get_state(hostp);
DTRACE_PROBE3(reclaim__iter, struct nlm_globals *, g,
struct nlm_host *, hostp, int, state);
nlm_host_cancel_slocks(g, hostp);
llp_head = llp = flk_get_active_locks(sysid, NOPID);
while (llp != NULL) {
error = nlm_reclaim_lock(hostp, llp->ll_vp,
&llp->ll_flock, state);
if (error == 0) {
llp = llp->ll_next;
continue;
} else if (error == ERESTART) {
restart = TRUE;
break;
} else {
nlm_local_cancelk(llp->ll_vp, &llp->ll_flock);
}
llp = llp->ll_next;
}
flk_free_locklist(llp_head);
if (restart) {
continue;
}
nsp_head = nsp = nlm_get_active_shres(hostp);
while (nsp != NULL) {
error = nlm_reclaim_share(hostp, nsp->ns_vp,
nsp->ns_shr, state);
if (error == 0) {
nsp = nsp->ns_next;
continue;
} else if (error == ERESTART) {
break;
} else {
nlm_shres_untrack(hostp, nsp->ns_vp,
nsp->ns_shr);
nlm_local_shrcancel(nsp->ns_vp,
nsp->ns_shr);
}
nsp = nsp->ns_next;
}
nlm_free_shrlist(nsp_head);
} while (state != nlm_host_get_state(hostp));
}
int
nlm_frlock(struct vnode *vp, int cmd, struct flock64 *flkp,
int flags, u_offset_t offset, struct cred *crp,
struct netobj *fhp, struct flk_callback *flcb, int vers)
{
mntinfo_t *mi;
servinfo_t *sv;
const char *netid;
struct nlm_host *hostp;
int error;
struct nlm_globals *g;
mi = VTOMI(vp);
sv = mi->mi_curr_serv;
netid = nlm_knc_to_netid(sv->sv_knconf);
if (netid == NULL) {
NLM_ERR("nlm_frlock: unknown NFS netid");
return (ENOSYS);
}
g = zone_getspecific(nlm_zone_key, curzone);
hostp = nlm_host_findcreate(g, sv->sv_hostname, netid,
&sv->sv_addr, NULL);
if (hostp == NULL)
return (ENOSYS);
if (flkp->l_whence == SEEK_END)
PURGE_ATTRCACHE(vp);
switch (cmd) {
case F_GETLK:
error = nlm_frlock_getlk(hostp, vp, flkp, flags,
offset, fhp, vers);
break;
case F_SETLK:
case F_SETLKW:
error = nlm_frlock_setlk(hostp, vp, flkp, flags,
offset, fhp, flcb, vers, (cmd == F_SETLKW));
if (error == 0)
nlm_host_monitor(g, hostp, 0);
break;
default:
error = EINVAL;
break;
}
nlm_host_release(g, hostp);
return (error);
}
static int
nlm_frlock_getlk(struct nlm_host *hostp, vnode_t *vp,
struct flock64 *flkp, int flags, u_offset_t offset,
struct netobj *fhp, int vers)
{
struct flock64 flk0;
int error;
flk0 = *flkp;
flk0.l_pid = curproc->p_pid;
error = nlm_local_getlk(vp, &flk0, flags);
if (error != 0)
return (error);
if (flk0.l_type != F_UNLCK) {
*flkp = flk0;
return (0);
}
flk0 = *flkp;
flk0.l_pid = curproc->p_pid;
error = convoff(vp, &flk0, 0, (offset_t)offset);
if (error != 0)
return (error);
error = nlm_call_test(&flk0, hostp, fhp, vers);
if (error != 0)
return (error);
if (flk0.l_type == F_UNLCK) {
flkp->l_type = F_UNLCK;
} else {
(void) convoff(vp, &flk0, flkp->l_whence, (offset_t)offset);
*flkp = flk0;
}
return (0);
}
static int
nlm_frlock_setlk(struct nlm_host *hostp, vnode_t *vp,
struct flock64 *flkp, int flags, u_offset_t offset,
struct netobj *fhp, struct flk_callback *flcb,
int vers, bool_t do_block)
{
int error, xflags;
error = convoff(vp, flkp, 0, (offset_t)offset);
if (error != 0)
return (error);
if (vers < NLM4_VERS) {
if (flkp->l_start > MAX_UOFF32 ||
flkp->l_start + flkp->l_len > MAX_UOFF32 + 1)
return (EINVAL);
}
flkp->l_sysid = hostp->nh_sysid | LM_SYSID_CLIENT;
flkp->l_pid = curproc->p_pid;
if (flkp->l_type == F_UNLCK) {
(void) nlm_local_setlk(vp, flkp, flags);
error = nlm_call_unlock(flkp, hostp, fhp, vers);
return (error);
}
if (!do_block) {
struct flock64 flk0;
flk0 = *flkp;
error = nlm_local_getlk(vp, &flk0, flags);
if (error != 0 && flk0.l_type != F_UNLCK) {
return (EAGAIN);
}
xflags = 0;
} else {
xflags = NLM_X_BLOCKING;
}
nfs_add_locking_id(vp, curproc->p_pid, RLMPL_PID,
(char *)&curproc->p_pid, sizeof (pid_t));
error = nlm_call_lock(vp, flkp, hostp, fhp, flcb, vers, xflags);
if (error != 0)
return (error);
error = nlm_local_setlk(vp, flkp, flags);
if (error != 0) {
NLM_WARN("nlm_frlock_setlk: Failed to set local lock. "
"[err=%d]\n", error);
error = 0;
}
return (error);
}
void
nlm_client_cancel_all(struct nlm_globals *g, struct nlm_host *hostp)
{
struct locklist *llp_head, *llp;
struct nlm_shres *nsp_head, *nsp;
struct netobj lm_fh;
rpcvers_t vers;
int error, sysid;
sysid = hostp->nh_sysid | LM_SYSID_CLIENT;
nlm_host_cancel_slocks(g, hostp);
llp_head = llp = flk_get_active_locks(sysid, NOPID);
while (llp != NULL) {
llp->ll_flock.l_type = F_UNLCK;
error = nlm_init_fh_by_vp(llp->ll_vp, &lm_fh, &vers);
if (error == 0)
(void) nlm_call_unlock(&llp->ll_flock, hostp,
&lm_fh, vers);
nlm_local_cancelk(llp->ll_vp, &llp->ll_flock);
llp = llp->ll_next;
}
flk_free_locklist(llp_head);
nsp_head = nsp = nlm_get_active_shres(hostp);
while (nsp != NULL) {
error = nlm_init_fh_by_vp(nsp->ns_vp, &lm_fh, &vers);
if (error == 0)
(void) nlm_call_unshare(nsp->ns_shr, hostp,
&lm_fh, vers);
nlm_local_shrcancel(nsp->ns_vp, nsp->ns_shr);
nlm_shres_untrack(hostp, nsp->ns_vp, nsp->ns_shr);
nsp = nsp->ns_next;
}
nlm_free_shrlist(nsp_head);
}
int
nlm_safelock(vnode_t *vp, const struct flock64 *fl, cred_t *cr)
{
rnode_t *rp = VTOR(vp);
struct vattr va;
int err;
if ((rp->r_mapcnt > 0) && (fl->l_start != 0 || fl->l_len != 0))
return (0);
va.va_mask = AT_MODE;
err = VOP_GETATTR(vp, &va, 0, cr, NULL);
if (err != 0)
return (0);
if (MANDLOCK(vp, va.va_mode))
return (0);
return (1);
}
int
nlm_safemap(const vnode_t *vp)
{
struct locklist *llp, *llp_next;
struct nlm_slock *nslp;
struct nlm_globals *g;
int safe = 1;
llp = flk_active_locks_for_vp(vp);
while (llp != NULL) {
if ((llp->ll_vp == vp) &&
!NLM_FLOCK_IS_SAFE(&llp->ll_flock))
safe = 0;
llp_next = llp->ll_next;
VN_RELE(llp->ll_vp);
kmem_free(llp, sizeof (*llp));
llp = llp_next;
}
if (!safe)
return (safe);
g = zone_getspecific(nlm_zone_key, curzone);
mutex_enter(&g->lock);
TAILQ_FOREACH(nslp, &g->nlm_slocks, nsl_link) {
if (nslp->nsl_state == NLM_SL_BLOCKED &&
nslp->nsl_vp == vp &&
(nslp->nsl_lock.l_offset != 0 ||
nslp->nsl_lock.l_len != 0)) {
safe = 0;
break;
}
}
mutex_exit(&g->lock);
return (safe);
}
int
nlm_has_sleep(const vnode_t *vp)
{
struct nlm_globals *g;
struct nlm_slock *nslp;
int has_slocks = FALSE;
g = zone_getspecific(nlm_zone_key, curzone);
mutex_enter(&g->lock);
TAILQ_FOREACH(nslp, &g->nlm_slocks, nsl_link) {
if (nslp->nsl_state == NLM_SL_BLOCKED &&
nslp->nsl_vp == vp) {
has_slocks = TRUE;
break;
}
}
mutex_exit(&g->lock);
return (has_slocks);
}
void
nlm_register_lock_locally(struct vnode *vp, struct nlm_host *hostp,
struct flock64 *flk, int flags, u_offset_t offset)
{
struct nlm_globals *g = NULL;
int sysid = 0;
if (hostp == NULL) {
mntinfo_t *mi;
servinfo_t *sv;
const char *netid;
mi = VTOMI(vp);
sv = mi->mi_curr_serv;
netid = nlm_knc_to_netid(sv->sv_knconf);
if (netid != NULL) {
g = zone_getspecific(nlm_zone_key, curzone);
hostp = nlm_host_findcreate(g, sv->sv_hostname,
netid, &sv->sv_addr, NULL);
}
}
if (hostp != NULL) {
sysid = hostp->nh_sysid | LM_SYSID_CLIENT;
if (g != NULL)
nlm_host_release(g, hostp);
}
flk->l_sysid = sysid;
(void) convoff(vp, flk, 0, (offset_t)offset);
(void) nlm_local_setlk(vp, flk, flags);
}
static int
nlm_reclaim_lock(struct nlm_host *hostp, vnode_t *vp,
struct flock64 *flp, int32_t orig_state)
{
struct netobj lm_fh;
int error, state;
rpcvers_t vers;
state = nlm_host_get_state(hostp);
if (state != orig_state)
return (ERESTART);
error = nlm_init_fh_by_vp(vp, &lm_fh, &vers);
if (error != 0)
return (error);
return (nlm_call_lock(vp, flp, hostp, &lm_fh,
NULL, vers, NLM_X_RECLAIM));
}
static int
nlm_local_getlk(vnode_t *vp, struct flock64 *fl, int flags)
{
VERIFY(fl->l_whence == SEEK_SET);
return (reclock(vp, fl, 0, flags, 0, NULL));
}
static int
nlm_local_setlk(vnode_t *vp, struct flock64 *fl, int flags)
{
VERIFY(fl->l_whence == SEEK_SET);
return (reclock(vp, fl, SETFLCK, flags, 0, NULL));
}
static void
nlm_local_cancelk(vnode_t *vp, struct flock64 *flp)
{
flp->l_type = F_UNLCK;
(void) nlm_local_setlk(vp, flp, FREAD | FWRITE);
nlm_send_siglost(flp->l_pid);
}
static int
nlm_call_lock(vnode_t *vp, struct flock64 *flp,
struct nlm_host *hostp, struct netobj *fhp,
struct flk_callback *flcb, int vers, int xflags)
{
struct nlm4_lockargs args;
struct nlm_owner_handle oh;
struct nlm_globals *g;
rnode_t *rnp = VTOR(vp);
struct nlm_slock *nslp = NULL;
uint32_t xid;
int error = 0;
bzero(&args, sizeof (args));
g = zone_getspecific(nlm_zone_key, curzone);
nlm_init_lock(&args.alock, flp, fhp, &oh);
args.exclusive = (flp->l_type == F_WRLCK);
args.reclaim = xflags & NLM_X_RECLAIM;
args.state = g->nsm_state;
args.cookie.n_len = sizeof (xid);
args.cookie.n_bytes = (char *)&xid;
oh.oh_sysid = hostp->nh_sysid;
xid = atomic_inc_32_nv(&nlm_xid);
if (xflags & NLM_X_BLOCKING) {
args.block = TRUE;
nslp = nlm_slock_register(g, hostp, &args.alock, vp);
}
for (;;) {
nlm_rpc_t *rpcp;
enum clnt_stat stat;
struct nlm4_res res;
enum nlm4_stats nlm_err;
error = nlm_host_get_rpc(hostp, vers, &rpcp);
if (error != 0) {
error = ENOLCK;
goto out;
}
bzero(&res, sizeof (res));
stat = nlm_lock_rpc(&args, &res, rpcp->nr_handle, vers);
nlm_host_rele_rpc(hostp, rpcp);
error = nlm_map_clnt_stat(stat);
if (error != 0) {
if (error == EAGAIN)
continue;
goto out;
}
DTRACE_PROBE1(lock__res, enum nlm4_stats, res.stat.stat);
nlm_err = res.stat.stat;
xdr_free((xdrproc_t)xdr_nlm4_res, (void *)&res);
if (nlm_err == nlm4_denied_grace_period) {
if (args.reclaim) {
error = ENOLCK;
goto out;
}
error = nlm_host_wait_grace(hostp);
if (error != 0)
goto out;
continue;
}
switch (nlm_err) {
case nlm4_granted:
case nlm4_blocked:
error = 0;
break;
case nlm4_denied:
if (nslp != NULL) {
NLM_WARN("nlm_call_lock: got nlm4_denied for "
"blocking lock\n");
}
error = EAGAIN;
break;
default:
error = nlm_map_status(nlm_err);
}
if (nslp == NULL ||
nlm_err != nlm4_blocked ||
error != 0)
goto out;
if (!NLM_FLOCK_IS_SAFE(flp)) {
mutex_enter(&vp->v_lock);
vp->v_flag &= ~VNOCACHE;
mutex_exit(&vp->v_lock);
}
(void) flk_invoke_callbacks(flcb, FLK_BEFORE_SLEEP);
nfs_rw_exit(&rnp->r_lkserlock);
error = nlm_slock_wait(g, nslp, g->retrans_tmo);
(void) nfs_rw_enter_sig(&rnp->r_lkserlock, RW_WRITER, 0);
(void) flk_invoke_callbacks(flcb, FLK_AFTER_SLEEP);
if (error == 0) {
break;
} else if (error == EINTR) {
DTRACE_PROBE1(cancel__lock, int, error);
(void) nlm_call_cancel(&args, hostp, vers);
break;
} else {
ASSERT(error == ETIMEDOUT);
continue;
}
}
if (error != 0 && nslp != NULL && nlm_safemap(vp)) {
mutex_enter(&vp->v_lock);
vp->v_flag |= VNOCACHE;
mutex_exit(&vp->v_lock);
}
out:
if (nslp != NULL)
nlm_slock_unregister(g, nslp);
return (error);
}
static int
nlm_call_cancel(struct nlm4_lockargs *largs,
struct nlm_host *hostp, int vers)
{
nlm4_cancargs cargs;
uint32_t xid;
int error, retries;
bzero(&cargs, sizeof (cargs));
xid = atomic_inc_32_nv(&nlm_xid);
cargs.cookie.n_len = sizeof (xid);
cargs.cookie.n_bytes = (char *)&xid;
cargs.block = largs->block;
cargs.exclusive = largs->exclusive;
cargs.alock = largs->alock;
for (retries = 0; retries < NLM_CANCEL_NRETRS; retries++) {
nlm_rpc_t *rpcp;
enum clnt_stat stat;
struct nlm4_res res;
error = nlm_host_get_rpc(hostp, vers, &rpcp);
if (error != 0)
return (ENOLCK);
bzero(&res, sizeof (res));
stat = nlm_cancel_rpc(&cargs, &res, rpcp->nr_handle, vers);
nlm_host_rele_rpc(hostp, rpcp);
DTRACE_PROBE1(cancel__rloop_end, enum clnt_stat, stat);
error = nlm_map_clnt_stat(stat);
if (error != 0) {
if (error == EAGAIN)
continue;
return (error);
}
DTRACE_PROBE1(cancel__res, enum nlm4_stats, res.stat.stat);
switch (res.stat.stat) {
case nlm_denied:
case nlm4_denied_grace_period:
case nlm4_granted:
error = 0;
break;
default:
error = EIO;
break;
}
xdr_free((xdrproc_t)xdr_nlm4_res, (void *)&res);
break;
}
return (error);
}
static int
nlm_call_unlock(struct flock64 *flp, struct nlm_host *hostp,
struct netobj *fhp, int vers)
{
struct nlm4_unlockargs args;
struct nlm_owner_handle oh;
enum nlm4_stats nlm_err;
uint32_t xid;
int error;
bzero(&args, sizeof (args));
nlm_init_lock(&args.alock, flp, fhp, &oh);
oh.oh_sysid = hostp->nh_sysid;
xid = atomic_inc_32_nv(&nlm_xid);
args.cookie.n_len = sizeof (xid);
args.cookie.n_bytes = (char *)&xid;
for (;;) {
nlm_rpc_t *rpcp;
struct nlm4_res res;
enum clnt_stat stat;
error = nlm_host_get_rpc(hostp, vers, &rpcp);
if (error != 0)
return (ENOLCK);
bzero(&res, sizeof (res));
stat = nlm_unlock_rpc(&args, &res, rpcp->nr_handle, vers);
nlm_host_rele_rpc(hostp, rpcp);
error = nlm_map_clnt_stat(stat);
if (error != 0) {
if (error == EAGAIN)
continue;
return (error);
}
DTRACE_PROBE1(unlock__res, enum nlm4_stats, res.stat.stat);
nlm_err = res.stat.stat;
xdr_free((xdrproc_t)xdr_nlm4_res, (void *)&res);
if (nlm_err == nlm4_denied_grace_period) {
error = nlm_host_wait_grace(hostp);
if (error != 0)
return (error);
continue;
}
break;
}
switch (nlm_err) {
case nlm4_denied:
error = EINVAL;
break;
default:
error = nlm_map_status(nlm_err);
break;
}
return (error);
}
static int
nlm_call_test(struct flock64 *flp, struct nlm_host *hostp,
struct netobj *fhp, int vers)
{
struct nlm4_testargs args;
struct nlm4_holder h;
struct nlm_owner_handle oh;
enum nlm4_stats nlm_err;
uint32_t xid;
int error;
bzero(&args, sizeof (args));
nlm_init_lock(&args.alock, flp, fhp, &oh);
args.exclusive = (flp->l_type == F_WRLCK);
oh.oh_sysid = hostp->nh_sysid;
xid = atomic_inc_32_nv(&nlm_xid);
args.cookie.n_len = sizeof (xid);
args.cookie.n_bytes = (char *)&xid;
for (;;) {
nlm_rpc_t *rpcp;
struct nlm4_testres res;
enum clnt_stat stat;
error = nlm_host_get_rpc(hostp, vers, &rpcp);
if (error != 0)
return (ENOLCK);
bzero(&res, sizeof (res));
stat = nlm_test_rpc(&args, &res, rpcp->nr_handle, vers);
nlm_host_rele_rpc(hostp, rpcp);
error = nlm_map_clnt_stat(stat);
if (error != 0) {
if (error == EAGAIN)
continue;
return (error);
}
DTRACE_PROBE1(test__res, enum nlm4_stats, res.stat.stat);
nlm_err = res.stat.stat;
bcopy(&res.stat.nlm4_testrply_u.holder, &h, sizeof (h));
xdr_free((xdrproc_t)xdr_nlm4_testres, (void *)&res);
if (nlm_err == nlm4_denied_grace_period) {
error = nlm_host_wait_grace(hostp);
if (error != 0)
return (error);
continue;
}
break;
}
switch (nlm_err) {
case nlm4_granted:
flp->l_type = F_UNLCK;
error = 0;
break;
case nlm4_denied:
flp->l_start = h.l_offset;
flp->l_len = h.l_len;
flp->l_pid = h.svid;
flp->l_type = (h.exclusive) ? F_WRLCK : F_RDLCK;
flp->l_whence = SEEK_SET;
flp->l_sysid = 0;
error = 0;
break;
default:
error = nlm_map_status(nlm_err);
break;
}
return (error);
}
static void
nlm_init_lock(struct nlm4_lock *lock,
const struct flock64 *fl, struct netobj *fh,
struct nlm_owner_handle *oh)
{
VERIFY(fl->l_whence == SEEK_SET);
bzero(lock, sizeof (*lock));
bzero(oh, sizeof (*oh));
lock->caller_name = uts_nodename();
lock->fh.n_len = fh->n_len;
lock->fh.n_bytes = fh->n_bytes;
lock->oh.n_len = sizeof (*oh);
lock->oh.n_bytes = (void *)oh;
lock->svid = fl->l_pid;
lock->l_offset = fl->l_start;
lock->l_len = fl->l_len;
}
int
nlm_shrlock(struct vnode *vp, int cmd, struct shrlock *shr,
int flags, struct netobj *fh, int vers)
{
struct shrlock shlk;
mntinfo_t *mi;
servinfo_t *sv;
const char *netid;
struct nlm_host *host = NULL;
int error;
struct nlm_globals *g;
mi = VTOMI(vp);
sv = mi->mi_curr_serv;
netid = nlm_knc_to_netid(sv->sv_knconf);
if (netid == NULL) {
NLM_ERR("nlm_shrlock: unknown NFS netid\n");
return (ENOSYS);
}
g = zone_getspecific(nlm_zone_key, curzone);
host = nlm_host_findcreate(g, sv->sv_hostname, netid,
&sv->sv_addr, NULL);
if (host == NULL)
return (ENOSYS);
shlk = *shr;
shlk.s_sysid = host->nh_sysid | LM_SYSID_CLIENT;
shlk.s_pid = curproc->p_pid;
if (cmd == F_UNSHARE) {
(void) nlm_local_shrlock(vp, &shlk, cmd, flags);
nlm_shres_untrack(host, vp, &shlk);
error = nlm_call_unshare(&shlk, host, fh, vers);
goto out;
}
nfs_add_locking_id(vp, curproc->p_pid, RLMPL_OWNER,
shr->s_owner, shr->s_own_len);
error = nlm_call_share(&shlk, host, fh, vers, FALSE);
if (error != 0)
goto out;
error = nlm_local_shrlock(vp, shr, cmd, flags);
if (error != 0) {
NLM_WARN("nlm_shrlock: set locally, err %d\n", error);
error = 0;
}
nlm_shres_track(host, vp, &shlk);
nlm_host_monitor(g, host, 0);
out:
nlm_host_release(g, host);
return (error);
}
static int
nlm_reclaim_share(struct nlm_host *hostp, vnode_t *vp,
struct shrlock *shr, uint32_t orig_state)
{
struct netobj lm_fh;
int error, state;
rpcvers_t vers;
state = nlm_host_get_state(hostp);
if (state != orig_state) {
return (ERESTART);
}
error = nlm_init_fh_by_vp(vp, &lm_fh, &vers);
if (error != 0)
return (error);
return (nlm_call_share(shr, hostp, &lm_fh, vers, 1));
}
int
nlm_local_shrlock(vnode_t *vp, struct shrlock *shr, int cmd, int flags)
{
return (fs_shrlock(vp, cmd, shr, flags, CRED(), NULL));
}
static void
nlm_local_shrcancel(vnode_t *vp, struct shrlock *shr)
{
(void) nlm_local_shrlock(vp, shr, F_UNSHARE, FREAD | FWRITE);
nlm_send_siglost(shr->s_pid);
}
static int
nlm_call_share(struct shrlock *shr, struct nlm_host *host,
struct netobj *fh, int vers, int reclaim)
{
struct nlm4_shareargs args;
enum nlm4_stats nlm_err;
uint32_t xid;
int error;
bzero(&args, sizeof (args));
nlm_init_share(&args.share, shr, fh);
args.reclaim = reclaim;
xid = atomic_inc_32_nv(&nlm_xid);
args.cookie.n_len = sizeof (xid);
args.cookie.n_bytes = (char *)&xid;
for (;;) {
nlm_rpc_t *rpcp;
struct nlm4_shareres res;
enum clnt_stat stat;
error = nlm_host_get_rpc(host, vers, &rpcp);
if (error != 0)
return (ENOLCK);
bzero(&res, sizeof (res));
stat = nlm_share_rpc(&args, &res, rpcp->nr_handle, vers);
nlm_host_rele_rpc(host, rpcp);
error = nlm_map_clnt_stat(stat);
if (error != 0) {
if (error == EAGAIN)
continue;
return (error);
}
DTRACE_PROBE1(share__res, enum nlm4_stats, res.stat);
nlm_err = res.stat;
xdr_free((xdrproc_t)xdr_nlm4_shareres, (void *)&res);
if (nlm_err == nlm4_denied_grace_period) {
if (args.reclaim)
return (ENOLCK);
error = nlm_host_wait_grace(host);
if (error != 0)
return (error);
continue;
}
break;
}
switch (nlm_err) {
case nlm4_granted:
error = 0;
break;
case nlm4_blocked:
case nlm4_denied:
error = EAGAIN;
break;
case nlm4_denied_nolocks:
case nlm4_deadlck:
error = ENOLCK;
break;
default:
error = EINVAL;
break;
}
return (error);
}
static int
nlm_call_unshare(struct shrlock *shr, struct nlm_host *host,
struct netobj *fh, int vers)
{
struct nlm4_shareargs args;
enum nlm4_stats nlm_err;
uint32_t xid;
int error;
bzero(&args, sizeof (args));
nlm_init_share(&args.share, shr, fh);
xid = atomic_inc_32_nv(&nlm_xid);
args.cookie.n_len = sizeof (xid);
args.cookie.n_bytes = (char *)&xid;
for (;;) {
nlm_rpc_t *rpcp;
struct nlm4_shareres res;
enum clnt_stat stat;
error = nlm_host_get_rpc(host, vers, &rpcp);
if (error != 0)
return (ENOLCK);
bzero(&res, sizeof (res));
stat = nlm_unshare_rpc(&args, &res, rpcp->nr_handle, vers);
nlm_host_rele_rpc(host, rpcp);
error = nlm_map_clnt_stat(stat);
if (error != 0) {
if (error == EAGAIN)
continue;
return (error);
}
DTRACE_PROBE1(unshare__res, enum nlm4_stats, res.stat);
nlm_err = res.stat;
xdr_free((xdrproc_t)xdr_nlm4_res, (void *)&res);
if (nlm_err == nlm4_denied_grace_period) {
error = nlm_host_wait_grace(host);
if (error != 0)
return (error);
continue;
}
break;
}
switch (nlm_err) {
case nlm4_granted:
error = 0;
break;
case nlm4_denied:
error = EAGAIN;
break;
case nlm4_denied_nolocks:
error = ENOLCK;
break;
default:
error = EINVAL;
break;
}
return (error);
}
static void
nlm_init_share(struct nlm4_share *args,
const struct shrlock *shr, struct netobj *fh)
{
bzero(args, sizeof (*args));
args->caller_name = uts_nodename();
args->fh.n_len = fh->n_len;
args->fh.n_bytes = fh->n_bytes;
args->oh.n_len = shr->s_own_len;
args->oh.n_bytes = (void *)shr->s_owner;
switch (shr->s_deny) {
default:
case F_NODNY:
args->mode = fsm_DN;
break;
case F_RDDNY:
args->mode = fsm_DR;
break;
case F_WRDNY:
args->mode = fsm_DW;
break;
case F_RWDNY:
args->mode = fsm_DRW;
break;
}
switch (shr->s_access) {
default:
case 0:
args->access = fsa_NONE;
break;
case F_RDACC:
args->access = fsa_R;
break;
case F_WRACC:
args->access = fsa_W;
break;
case F_RWACC:
args->access = fsa_RW;
break;
}
}
static int
nlm_init_fh_by_vp(vnode_t *vp, struct netobj *fh, rpcvers_t *lm_vers)
{
mntinfo_t *mi = VTOMI(vp);
switch (mi->mi_vers) {
case NFS_V3:
*lm_vers = NLM4_VERS;
fh->n_len = VTOFH3(vp)->fh3_length;
fh->n_bytes = (char *)&(VTOFH3(vp)->fh3_u.data);
break;
case NFS_VERSION:
*lm_vers = NLM_VERS;
fh->n_len = sizeof (fhandle_t);
fh->n_bytes = (char *)VTOFH(vp);
break;
default:
return (ENOSYS);
}
return (0);
}
static void
nlm_send_siglost(pid_t pid)
{
proc_t *p;
mutex_enter(&pidlock);
p = prfind(pid);
if (p != NULL)
psignal(p, SIGLOST);
mutex_exit(&pidlock);
}
static int
nlm_map_clnt_stat(enum clnt_stat stat)
{
switch (stat) {
case RPC_SUCCESS:
return (0);
case RPC_TIMEDOUT:
case RPC_PROGUNAVAIL:
return (EAGAIN);
case RPC_INTR:
return (EINTR);
default:
return (EINVAL);
}
}
static int
nlm_map_status(enum nlm4_stats stat)
{
switch (stat) {
case nlm4_granted:
return (0);
case nlm4_denied:
return (EAGAIN);
case nlm4_denied_nolocks:
return (ENOLCK);
case nlm4_blocked:
return (EAGAIN);
case nlm4_denied_grace_period:
return (EAGAIN);
case nlm4_deadlck:
return (EDEADLK);
case nlm4_rofs:
return (EROFS);
case nlm4_stale_fh:
return (ESTALE);
case nlm4_fbig:
return (EFBIG);
case nlm4_failed:
return (EACCES);
default:
return (EINVAL);
}
}