#ifndef _SYS_FLOCK_IMPL_H
#define _SYS_FLOCK_IMPL_H
#include <sys/types.h>
#include <sys/fcntl.h>
#include <sys/file.h>
#include <sys/flock.h>
#include <sys/kmem.h>
#include <sys/user.h>
#include <sys/thread.h>
#include <sys/proc.h>
#include <sys/cred.h>
#include <sys/debug.h>
#include <sys/cmn_err.h>
#include <sys/errno.h>
#include <sys/systm.h>
#include <sys/vnode.h>
#include <sys/share.h>
#ifdef __cplusplus
extern "C" {
#endif
struct edge {
struct edge *edge_adj_next;
struct edge *edge_adj_prev;
struct edge *edge_in_next;
struct edge *edge_in_prev;
struct lock_descriptor *from_vertex;
struct lock_descriptor *to_vertex;
};
typedef struct edge edge_t;
struct lock_descriptor {
struct lock_descriptor *l_next;
struct lock_descriptor *l_prev;
struct edge l_edge;
struct lock_descriptor *l_stack;
struct lock_descriptor *l_stack1;
struct lock_descriptor *l_dstack;
struct edge *l_sedge;
int l_index;
struct graph *l_graph;
vnode_t *l_vnode;
int l_type;
int l_state;
u_offset_t l_start;
u_offset_t l_end;
flock64_t l_flock;
int l_color;
kcondvar_t l_cv;
int pvertex;
int l_status;
flk_nlm_status_t l_nlm_state;
flk_callback_t *l_callbacks;
zoneid_t l_zoneid;
file_t *l_ofd;
};
typedef struct lock_descriptor lock_descriptor_t;
struct graph {
kmutex_t gp_mutex;
struct lock_descriptor active_locks;
struct lock_descriptor sleeping_locks;
int index;
int mark;
};
typedef struct graph graph_t;
#define FLK_INITIAL_STATE 1
#define FLK_START_STATE 2
#define FLK_ACTIVE_STATE 3
#define FLK_SLEEPING_STATE 4
#define FLK_GRANTED_STATE 5
#define FLK_INTERRUPTED_STATE 6
#define FLK_CANCELLED_STATE 7
#define FLK_DEAD_STATE 8
#define ACTIVE_LOCK 0x0001
#define SLEEPING_LOCK 0x0002
#define IO_LOCK 0x0004
#define REFERENCED_LOCK 0x0008
#define QUERY_LOCK 0x0010
#define WILLING_TO_SLEEP_LOCK 0x0020
#define RECOMPUTE_LOCK 0x0040
#define RECOMPUTE_DONE 0x0080
#define BARRIER_LOCK 0x0100
#define GRANTED_LOCK 0x0200
#define CANCELLED_LOCK 0x0400
#define DELETED_LOCK 0x0800
#define INTERRUPTED_LOCK 0x1000
#define LOCKMGR_LOCK 0x2000
#define PXFS_LOCK 0x4000
#define NBMAND_LOCK 0x8000
#define HASH_SIZE 32
#define HASH_SHIFT (HASH_SIZE - 1)
#define HASH_INDEX(vp) (((uintptr_t)vp >> 7) & HASH_SHIFT)
extern struct graph *lock_graph[HASH_SIZE];
extern struct kmem_cache *flk_edge_cache;
int flk_execute_request(lock_descriptor_t *);
void flk_cancel_sleeping_lock(lock_descriptor_t *, int);
void flk_set_state(lock_descriptor_t *, int);
graph_t *flk_get_lock_graph(vnode_t *, int);
#define FLK_USE_GRAPH 0
#define FLK_INIT_GRAPH 1
#define NO_COLOR 0
#define NO_CHECK_CYCLE 0
#define CHECK_CYCLE 1
#define SAME_OWNER(lock1, lock2) \
(((lock1)->l_flock.l_pid == (lock2)->l_flock.l_pid) && \
((lock1)->l_flock.l_sysid == (lock2)->l_flock.l_sysid) && \
((lock1)->l_ofd == (lock2)->l_ofd))
#define COLORED(vertex) ((vertex)->l_color == (vertex)->l_graph->mark)
#define COLOR(vertex) ((vertex)->l_color = (vertex)->l_graph->mark)
#define STACK_INIT(stack) ((stack) = NULL)
#define STACK_PUSH(stack, ptr, stack_link) (ptr)->stack_link = (stack),\
(stack) = (ptr)
#define STACK_POP(stack, stack_link) (stack) = (stack)->stack_link
#define STACK_TOP(stack) (stack)
#define STACK_EMPTY(stack) ((stack) == NULL)
#define ACTIVE_HEAD(gp) (&(gp)->active_locks)
#define SLEEPING_HEAD(gp) (&(gp)->sleeping_locks)
#define SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp) \
{ \
(lock) = (lock_descriptor_t *)vp->v_filocks; \
}
#define SET_LOCK_TO_FIRST_SLEEP_VP(gp, lock, vp) \
{ \
for ((lock) = SLEEPING_HEAD((gp))->l_next; ((lock) != SLEEPING_HEAD((gp)) && \
(lock)->l_vnode != (vp)); (lock) = (lock)->l_next) \
; \
(lock) = ((lock) == SLEEPING_HEAD((gp))) ? NULL : (lock); \
}
#define OVERLAP(lock1, lock2) \
(((lock1)->l_start <= (lock2)->l_start && \
(lock2)->l_start <= (lock1)->l_end) || \
((lock2)->l_start <= (lock1)->l_start && \
(lock1)->l_start <= (lock2)->l_end))
#define IS_INITIAL(lock) ((lock)->l_status == FLK_INITIAL_STATE)
#define IS_ACTIVE(lock) ((lock)->l_status == FLK_ACTIVE_STATE)
#define IS_SLEEPING(lock) ((lock)->l_status == FLK_SLEEPING_STATE)
#define IS_GRANTED(lock) ((lock)->l_status == FLK_GRANTED_STATE)
#define IS_INTERRUPTED(lock) ((lock)->l_status == FLK_INTERRUPTED_STATE)
#define IS_CANCELLED(lock) ((lock)->l_status == FLK_CANCELLED_STATE)
#define IS_DEAD(lock) ((lock)->l_status == FLK_DEAD_STATE)
#define IS_QUERY_LOCK(lock) ((lock)->l_state & QUERY_LOCK)
#define IS_RECOMPUTE(lock) ((lock)->l_state & RECOMPUTE_LOCK)
#define IS_BARRIER(lock) ((lock)->l_state & BARRIER_LOCK)
#define IS_DELETED(lock) ((lock)->l_state & DELETED_LOCK)
#define IS_REFERENCED(lock) ((lock)->l_state & REFERENCED_LOCK)
#define IS_IO_LOCK(lock) ((lock)->l_state & IO_LOCK)
#define IS_WILLING_TO_SLEEP(lock) \
((lock)->l_state & WILLING_TO_SLEEP_LOCK)
#define IS_LOCKMGR(lock) ((lock)->l_state & LOCKMGR_LOCK)
#define IS_NLM_UP(lock) ((lock)->l_nlm_state == FLK_NLM_UP)
#define IS_PXFS(lock) ((lock)->l_state & PXFS_LOCK)
#define IS_LOCAL(lock) (GETSYSID((lock)->l_flock.l_sysid) == 0)
#define IS_REMOTE(lock) (! IS_LOCAL(lock))
#define PXFS_LOCK_BLOCKED -1
extern void cl_flk_state_transition_notify(lock_descriptor_t *lock,
int old_state, int new_state);
#define BLOCKS(lock1, lock2) (!SAME_OWNER((lock1), (lock2)) && \
(((lock1)->l_type == F_WRLCK) || \
((lock2)->l_type == F_WRLCK)) && \
OVERLAP((lock1), (lock2)))
#define COVERS(lock1, lock2) \
(((lock1)->l_start <= (lock2)->l_start) && \
((lock1)->l_end >= (lock2)->l_end))
#define IN_LIST_REMOVE(ep) \
{ \
(ep)->edge_in_next->edge_in_prev = (ep)->edge_in_prev; \
(ep)->edge_in_prev->edge_in_next = (ep)->edge_in_next; \
}
#define ADJ_LIST_REMOVE(ep) \
{ \
(ep)->edge_adj_next->edge_adj_prev = (ep)->edge_adj_prev; \
(ep)->edge_adj_prev->edge_adj_next = (ep)->edge_adj_next; \
}
#define NOT_BLOCKED(lock) \
((lock)->l_edge.edge_adj_next == &(lock)->l_edge && !IS_GRANTED(lock))
#define GRANT_WAKEUP(lock) \
{ \
flk_set_state(lock, FLK_GRANTED_STATE); \
(lock)->l_state |= GRANTED_LOCK; \
\
if (!IS_PXFS(lock)) { \
cv_signal(&(lock)->l_cv); \
} \
}
#define CANCEL_WAKEUP(lock) \
{ \
flk_set_state(lock, FLK_CANCELLED_STATE); \
(lock)->l_state |= CANCELLED_LOCK; \
\
if (!IS_PXFS(lock)) { \
cv_signal(&(lock)->l_cv); \
} \
}
#define INTERRUPT_WAKEUP(lock) \
{ \
flk_set_state(lock, FLK_INTERRUPTED_STATE); \
(lock)->l_state |= INTERRUPTED_LOCK; \
\
if (!IS_PXFS(lock)) { \
cv_signal(&(lock)->l_cv); \
} \
}
#define REMOVE_SLEEP_QUEUE(lock) \
{ \
ASSERT(IS_SLEEPING(lock) || IS_GRANTED(lock) || \
IS_INTERRUPTED(lock) || IS_CANCELLED(lock)); \
(lock)->l_state &= ~SLEEPING_LOCK; \
(lock)->l_next->l_prev = (lock)->l_prev; \
(lock)->l_prev->l_next = (lock)->l_next; \
(lock)->l_next = (lock)->l_prev = (lock_descriptor_t *)NULL; \
}
#define NO_DEPENDENTS(lock) \
((lock)->l_edge.edge_in_next == &(lock)->l_edge)
#define GRANT(lock) \
{ \
(lock)->l_state |= GRANTED_LOCK; \
flk_set_state(lock, FLK_GRANTED_STATE); \
}
#define FIRST_IN(lock) ((lock)->l_edge.edge_in_next)
#define FIRST_ADJ(lock) ((lock)->l_edge.edge_adj_next)
#define HEAD(lock) (&(lock)->l_edge)
#define NEXT_ADJ(ep) ((ep)->edge_adj_next)
#define NEXT_IN(ep) ((ep)->edge_in_next)
#define IN_ADJ_INIT(lock) \
{ \
(lock)->l_edge.edge_adj_next = (lock)->l_edge.edge_adj_prev = &(lock)->l_edge; \
(lock)->l_edge.edge_in_next = (lock)->l_edge.edge_in_prev = &(lock)->l_edge; \
}
#define COPY(lock1, lock2) \
{ \
(lock1)->l_graph = (lock2)->l_graph; \
(lock1)->l_vnode = (lock2)->l_vnode; \
(lock1)->l_type = (lock2)->l_type; \
(lock1)->l_state = (lock2)->l_state; \
(lock1)->l_start = (lock2)->l_start; \
(lock1)->l_end = (lock2)->l_end; \
(lock1)->l_flock = (lock2)->l_flock; \
(lock1)->l_zoneid = (lock2)->l_zoneid; \
(lock1)->pvertex = (lock2)->pvertex; \
}
#define SET_NLM_STATE(lock, nlm_state) ((lock)->l_nlm_state = nlm_state)
#define GET_NLM_STATE(lock) ((lock)->l_nlm_state)
#define FLK_REGISTRY_IS_NLM_UNKNOWN(nlmreg, nlmid) \
((nlmreg)[nlmid] == FLK_NLM_UNKNOWN)
#define FLK_REGISTRY_IS_NLM_UP(nlmreg, nlmid) \
((nlmreg)[nlmid] == FLK_NLM_UP)
#define FLK_REGISTRY_ADD_NLMID(nlmreg, nlmid) \
((nlmreg)[nlmid] = FLK_NLM_UP)
#define FLK_REGISTRY_CHANGE_NLM_STATE(nlmreg, nlmid, state) \
((nlmreg)[nlmid] = state)
#define FLK_UNLOCK 0x1
#define FLK_DOWNGRADE 0x2
#define FLK_UPGRADE 0x3
#define FLK_STAY_SAME 0x4
struct proc_vertex {
pid_t pid;
long sysid;
struct proc_edge *edge;
int incount;
struct proc_edge *p_sedge;
struct proc_vertex *p_stack;
int atime;
int dtime;
int index;
};
typedef struct proc_vertex proc_vertex_t;
struct proc_edge {
struct proc_edge *next;
int refcount;
struct proc_vertex *to_proc;
};
typedef struct proc_edge proc_edge_t;
#define PROC_CHUNK 100
struct proc_graph {
struct proc_vertex **proc;
int gcount;
int free;
int mark;
};
typedef struct proc_graph proc_graph_t;
extern struct proc_graph pgraph;
#define PROC_SAME_OWNER(lock, pvertex) \
(((lock)->l_flock.l_pid == (pvertex)->pid) && \
((lock)->l_flock.l_sysid == (pvertex)->sysid))
#define PROC_ARRIVE(pvertex) ((pvertex)->atime = pgraph.mark)
#define PROC_DEPART(pvertex) ((pvertex)->dtime = pgraph.mark)
#define PROC_ARRIVED(pvertex) ((pvertex)->atime == pgraph.mark)
#define PROC_DEPARTED(pvertex) ((pvertex)->dtime == pgraph.mark)
#ifdef __cplusplus
}
#endif
#endif