#include <sys/types.h>
#include <sys/t_lock.h>
#include <sys/sysmacros.h>
#include <sys/vtrace.h>
#include <sys/systm.h>
#include <sys/vmsystm.h>
#include <sys/mman.h>
#include <sys/errno.h>
#include <sys/kmem.h>
#include <sys/cmn_err.h>
#include <sys/vnode.h>
#include <sys/proc.h>
#include <sys/conf.h>
#include <sys/debug.h>
#include <sys/ddidevmap.h>
#include <sys/ddi_implfuncs.h>
#include <sys/lgrp.h>
#include <vm/page.h>
#include <vm/hat.h>
#include <vm/as.h>
#include <vm/seg.h>
#include <vm/seg_dev.h>
#include <vm/seg_kp.h>
#include <vm/seg_kmem.h>
#include <vm/vpage.h>
#include <sys/sunddi.h>
#include <sys/esunddi.h>
#include <sys/fs/snode.h>
#if DEBUG
int segdev_debug;
#define DEBUGF(level, args) { if (segdev_debug >= (level)) cmn_err args; }
#else
#define DEBUGF(level, args)
#endif
#define CTX_TIMEOUT_VALUE 0
#define HOLD_DHP_LOCK(dhp) if (dhp->dh_flags & DEVMAP_ALLOW_REMAP) \
{ mutex_enter(&dhp->dh_lock); }
#define RELE_DHP_LOCK(dhp) if (dhp->dh_flags & DEVMAP_ALLOW_REMAP) \
{ mutex_exit(&dhp->dh_lock); }
#define round_down_p2(a, s) ((a) & ~((s) - 1))
#define round_up_p2(a, s) (((a) + (s) - 1) & ~((s) - 1))
#define VA_PA_ALIGNED(uvaddr, paddr, pgsize) \
(((uvaddr | paddr) & (pgsize - 1)) == 0)
#define VA_PA_PGSIZE_ALIGNED(uvaddr, paddr, pgsize) \
(((uvaddr ^ paddr) & (pgsize - 1)) == 0)
#define vpgtob(n) ((n) * sizeof (struct vpage))
#define VTOCVP(vp) (VTOS(vp)->s_commonvp)
static struct devmap_ctx *devmapctx_list = NULL;
static struct devmap_softlock *devmap_slist = NULL;
static struct vnode trashvp;
static struct page *trashpp;
static vmem_t *umem_np_arena;
#define DEVMAP_DEVMEM_COOKIE ((ddi_umem_cookie_t)0x1)
#define cookie_is_devmem(c) \
((c) == (struct ddi_umem_cookie *)DEVMAP_DEVMEM_COOKIE)
#define cookie_is_pmem(c) \
((c) == (struct ddi_umem_cookie *)DEVMAP_PMEM_COOKIE)
#define cookie_is_kpmem(c) (!cookie_is_devmem(c) && !cookie_is_pmem(c) &&\
((c)->type == KMEM_PAGEABLE))
#define dhp_is_devmem(dhp) \
(cookie_is_devmem((struct ddi_umem_cookie *)((dhp)->dh_cookie)))
#define dhp_is_pmem(dhp) \
(cookie_is_pmem((struct ddi_umem_cookie *)((dhp)->dh_cookie)))
#define dhp_is_kpmem(dhp) \
(cookie_is_kpmem((struct ddi_umem_cookie *)((dhp)->dh_cookie)))
static int segdev_dup(struct seg *, struct seg *);
static int segdev_unmap(struct seg *, caddr_t, size_t);
static void segdev_free(struct seg *);
static faultcode_t segdev_fault(struct hat *, struct seg *, caddr_t, size_t,
enum fault_type, enum seg_rw);
static faultcode_t segdev_faulta(struct seg *, caddr_t);
static int segdev_setprot(struct seg *, caddr_t, size_t, uint_t);
static int segdev_checkprot(struct seg *, caddr_t, size_t, uint_t);
static void segdev_badop(void);
static int segdev_sync(struct seg *, caddr_t, size_t, int, uint_t);
static size_t segdev_incore(struct seg *, caddr_t, size_t, char *);
static int segdev_lockop(struct seg *, caddr_t, size_t, int, int,
ulong_t *, size_t);
static int segdev_getprot(struct seg *, caddr_t, size_t, uint_t *);
static u_offset_t segdev_getoffset(struct seg *, caddr_t);
static int segdev_gettype(struct seg *, caddr_t);
static int segdev_getvp(struct seg *, caddr_t, struct vnode **);
static int segdev_advise(struct seg *, caddr_t, size_t, uint_t);
static void segdev_dump(struct seg *);
static int segdev_pagelock(struct seg *, caddr_t, size_t,
struct page ***, enum lock_type, enum seg_rw);
static int segdev_setpagesize(struct seg *, caddr_t, size_t, uint_t);
static int segdev_getmemid(struct seg *, caddr_t, memid_t *);
static lgrp_mem_policy_info_t *segdev_getpolicy(struct seg *, caddr_t);
static int segdev_capable(struct seg *, segcapability_t);
struct seg_ops segdev_ops = {
segdev_dup,
segdev_unmap,
segdev_free,
segdev_fault,
segdev_faulta,
segdev_setprot,
segdev_checkprot,
(int (*)())segdev_badop,
(size_t (*)(struct seg *))NULL,
segdev_sync,
segdev_incore,
segdev_lockop,
segdev_getprot,
segdev_getoffset,
segdev_gettype,
segdev_getvp,
segdev_advise,
segdev_dump,
segdev_pagelock,
segdev_setpagesize,
segdev_getmemid,
segdev_getpolicy,
segdev_capable,
seg_inherit_notsup
};
static struct segdev_data *sdp_alloc(void);
static void segdev_softunlock(struct hat *, struct seg *, caddr_t,
size_t, enum seg_rw);
static faultcode_t segdev_faultpage(struct hat *, struct seg *, caddr_t,
struct vpage *, enum fault_type, enum seg_rw, devmap_handle_t *);
static faultcode_t segdev_faultpages(struct hat *, struct seg *, caddr_t,
size_t, enum fault_type, enum seg_rw, devmap_handle_t *);
static struct devmap_ctx *devmap_ctxinit(dev_t, ulong_t);
static struct devmap_softlock *devmap_softlock_init(dev_t, ulong_t);
static void devmap_softlock_rele(devmap_handle_t *);
static void devmap_ctx_rele(devmap_handle_t *);
static void devmap_ctxto(void *);
static devmap_handle_t *devmap_find_handle(devmap_handle_t *dhp_head,
caddr_t addr);
static ulong_t devmap_roundup(devmap_handle_t *dhp, ulong_t offset, size_t len,
ulong_t *opfn, ulong_t *pagesize);
static void free_devmap_handle(devmap_handle_t *dhp);
static int devmap_handle_dup(devmap_handle_t *dhp, devmap_handle_t **new_dhp,
struct seg *newseg);
static devmap_handle_t *devmap_handle_unmap(devmap_handle_t *dhp);
static void devmap_handle_unmap_head(devmap_handle_t *dhp, size_t len);
static void devmap_handle_unmap_tail(devmap_handle_t *dhp, caddr_t addr);
static int devmap_device(devmap_handle_t *dhp, struct as *as, caddr_t *addr,
offset_t off, size_t len, uint_t flags);
static void devmap_get_large_pgsize(devmap_handle_t *dhp, size_t len,
caddr_t addr, size_t *llen, caddr_t *laddr);
static void devmap_handle_reduce_len(devmap_handle_t *dhp, size_t len);
static void *devmap_alloc_pages(vmem_t *vmp, size_t size, int vmflag);
static void devmap_free_pages(vmem_t *vmp, void *inaddr, size_t size);
static void *devmap_umem_alloc_np(size_t size, size_t flags);
static void devmap_umem_free_np(void *addr, size_t size);
static faultcode_t acquire_kpmem_lock(struct ddi_umem_cookie *, size_t);
static void release_kpmem_lock(struct ddi_umem_cookie *, size_t);
static int devmap_softlock_enter(struct devmap_softlock *, size_t,
enum fault_type);
static void devmap_softlock_exit(struct devmap_softlock *, size_t,
enum fault_type);
static kmutex_t devmapctx_lock;
static kmutex_t devmap_slock;
static struct devmap_ctx *
devmap_ctxinit(dev_t dev, ulong_t id)
{
struct devmap_ctx *devctx;
struct devmap_ctx *tmp;
dev_info_t *dip;
tmp = kmem_zalloc(sizeof (struct devmap_ctx), KM_SLEEP);
mutex_enter(&devmapctx_lock);
dip = e_ddi_hold_devi_by_dev(dev, 0);
ASSERT(dip != NULL);
ddi_release_devi(dip);
for (devctx = devmapctx_list; devctx != NULL; devctx = devctx->next)
if ((devctx->dip == dip) && (devctx->id == id))
break;
if (devctx == NULL) {
devctx = tmp;
devctx->dip = dip;
devctx->id = id;
mutex_init(&devctx->lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&devctx->cv, NULL, CV_DEFAULT, NULL);
devctx->next = devmapctx_list;
devmapctx_list = devctx;
} else
kmem_free(tmp, sizeof (struct devmap_ctx));
mutex_enter(&devctx->lock);
devctx->refcnt++;
mutex_exit(&devctx->lock);
mutex_exit(&devmapctx_lock);
return (devctx);
}
static void
devmap_ctxto(void *data)
{
struct devmap_ctx *devctx = data;
TRACE_1(TR_FAC_DEVMAP, TR_DEVMAP_CTXTO,
"devmap_ctxto:timeout expired, devctx=%p", (void *)devctx);
mutex_enter(&devctx->lock);
devctx->oncpu = 0;
devctx->timeout = 0;
cv_signal(&devctx->cv);
mutex_exit(&devctx->lock);
}
int
segdev_create(struct seg **segpp, void *argsp)
{
struct seg *seg = *segpp;
struct segdev_data *sdp;
struct segdev_crargs *a = (struct segdev_crargs *)argsp;
devmap_handle_t *dhp = (devmap_handle_t *)a->devmap_data;
int error;
ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
hat_map(seg->s_as->a_hat, seg->s_base, seg->s_size, HAT_MAP);
sdp = sdp_alloc();
sdp->mapfunc = a->mapfunc;
sdp->offset = a->offset;
sdp->prot = a->prot;
sdp->maxprot = a->maxprot;
sdp->type = a->type;
sdp->pageprot = 0;
sdp->softlockcnt = 0;
sdp->vpage = NULL;
if (sdp->mapfunc == NULL)
sdp->devmap_data = dhp;
else
sdp->devmap_data = dhp = NULL;
sdp->hat_flags = a->hat_flags;
sdp->hat_attr = a->hat_attr;
ASSERT(!(sdp->hat_flags & ~HAT_LOAD_NOCONSIST));
sdp->vp = specfind(a->dev, VCHR);
ASSERT(sdp->vp != NULL);
seg->s_ops = &segdev_ops;
seg->s_data = sdp;
while (dhp != NULL) {
dhp->dh_seg = seg;
dhp = dhp->dh_next;
}
error = VOP_ADDMAP(VTOCVP(sdp->vp), sdp->offset,
seg->s_as, seg->s_base, seg->s_size,
sdp->prot, sdp->maxprot, sdp->type, CRED(), NULL);
if (error != 0) {
sdp->devmap_data = NULL;
hat_unload(seg->s_as->a_hat, seg->s_base, seg->s_size,
HAT_UNLOAD_UNMAP);
} else {
if ((SEGOP_GETTYPE(seg, (seg)->s_base) & (MAP_SHARED |
MAP_PRIVATE)) == 0) {
seg->s_as->a_resvsize -= seg->s_size;
}
}
return (error);
}
static struct segdev_data *
sdp_alloc(void)
{
struct segdev_data *sdp;
sdp = kmem_zalloc(sizeof (struct segdev_data), KM_SLEEP);
rw_init(&sdp->lock, NULL, RW_DEFAULT, NULL);
return (sdp);
}
static int
segdev_dup(struct seg *seg, struct seg *newseg)
{
struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
struct segdev_data *newsdp;
devmap_handle_t *dhp = (devmap_handle_t *)sdp->devmap_data;
size_t npages;
int ret;
TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_DUP,
"segdev_dup:start dhp=%p, seg=%p", (void *)dhp, (void *)seg);
DEBUGF(3, (CE_CONT, "segdev_dup: dhp %p seg %p\n",
(void *)dhp, (void *)seg));
ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
newsdp = sdp_alloc();
newseg->s_ops = seg->s_ops;
newseg->s_data = (void *)newsdp;
VN_HOLD(sdp->vp);
newsdp->vp = sdp->vp;
newsdp->mapfunc = sdp->mapfunc;
newsdp->offset = sdp->offset;
newsdp->pageprot = sdp->pageprot;
newsdp->prot = sdp->prot;
newsdp->maxprot = sdp->maxprot;
newsdp->type = sdp->type;
newsdp->hat_attr = sdp->hat_attr;
newsdp->hat_flags = sdp->hat_flags;
newsdp->softlockcnt = 0;
npages = seg_pages(newseg);
if (sdp->vpage != NULL) {
size_t nbytes = vpgtob(npages);
newsdp->vpage = kmem_zalloc(nbytes, KM_SLEEP);
bcopy(sdp->vpage, newsdp->vpage, nbytes);
} else
newsdp->vpage = NULL;
if (dhp != NULL) {
ret = devmap_handle_dup(dhp,
(devmap_handle_t **)&newsdp->devmap_data, newseg);
if (ret != 0) {
TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_DUP_CK1,
"segdev_dup:ret1 ret=%x, dhp=%p seg=%p",
ret, (void *)dhp, (void *)seg);
DEBUGF(1, (CE_CONT,
"segdev_dup: ret %x dhp %p seg %p\n",
ret, (void *)dhp, (void *)seg));
return (ret);
}
}
return (VOP_ADDMAP(VTOCVP(newsdp->vp),
newsdp->offset, newseg->s_as,
newseg->s_base, newseg->s_size, newsdp->prot,
newsdp->maxprot, sdp->type, CRED(), NULL));
}
static int
devmap_handle_dup(devmap_handle_t *dhp, devmap_handle_t **new_dhp,
struct seg *newseg)
{
devmap_handle_t *newdhp_save = NULL;
devmap_handle_t *newdhp = NULL;
struct devmap_callback_ctl *callbackops;
while (dhp != NULL) {
newdhp = kmem_alloc(sizeof (devmap_handle_t), KM_SLEEP);
HOLD_DHP_LOCK(dhp);
bcopy(dhp, newdhp, sizeof (devmap_handle_t));
RELE_DHP_LOCK(dhp);
newdhp->dh_seg = newseg;
newdhp->dh_next = NULL;
if (newdhp_save != NULL)
newdhp_save->dh_next = newdhp;
else
*new_dhp = newdhp;
newdhp_save = newdhp;
callbackops = &newdhp->dh_callbackops;
if (dhp->dh_softlock != NULL)
newdhp->dh_softlock = devmap_softlock_init(
newdhp->dh_dev,
(ulong_t)callbackops->devmap_access);
if (dhp->dh_ctx != NULL)
newdhp->dh_ctx = devmap_ctxinit(newdhp->dh_dev,
(ulong_t)callbackops->devmap_access);
if (newdhp->dh_flags & DEVMAP_ALLOW_REMAP) {
mutex_init(&newdhp->dh_lock, NULL, MUTEX_DEFAULT, NULL);
newdhp->dh_flags |= DEVMAP_LOCK_INITED;
}
if (callbackops->devmap_dup != NULL) {
int ret;
ret = (*callbackops->devmap_dup)(dhp, dhp->dh_pvtp,
(devmap_cookie_t *)newdhp, &newdhp->dh_pvtp);
if (ret != 0) {
DEBUGF(1, (CE_CONT, "devmap_handle_dup ERROR: "
"newdhp %p dhp %p\n", (void *)newdhp,
(void *)dhp));
callbackops->devmap_unmap = NULL;
return (ret);
}
}
dhp = dhp->dh_next;
}
return (0);
}
static int
segdev_unmap(struct seg *seg, caddr_t addr, size_t len)
{
register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
register struct segdev_data *nsdp;
register struct seg *nseg;
register size_t opages;
register size_t npages;
register size_t dpages;
register size_t nbytes;
devmap_handle_t *dhp = (devmap_handle_t *)sdp->devmap_data;
devmap_handle_t *dhpp;
devmap_handle_t *newdhp;
struct devmap_callback_ctl *callbackops;
caddr_t nbase;
offset_t off;
ulong_t nsize;
size_t mlen, sz;
TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_UNMAP,
"segdev_unmap:start dhp=%p, seg=%p addr=%p len=%lx",
(void *)dhp, (void *)seg, (void *)addr, len);
DEBUGF(3, (CE_CONT, "segdev_unmap: dhp %p seg %p addr %p len %lx\n",
(void *)dhp, (void *)seg, (void *)addr, len));
ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
if ((sz = sdp->softlockcnt) > 0) {
TRACE_1(TR_FAC_DEVMAP, TR_DEVMAP_UNMAP_CK1,
"segdev_unmap:error softlockcnt = %ld", sz);
DEBUGF(1, (CE_CONT, "segdev_unmap: softlockcnt %ld\n", sz));
return (EAGAIN);
}
if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size ||
(len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET))
panic("segdev_unmap");
if (dhp != NULL) {
devmap_handle_t *tdhp;
dhpp = tdhp = devmap_find_handle(dhp, addr);
while (tdhp != NULL) {
if (tdhp->dh_flags & DEVMAP_FLAG_LARGE) {
break;
}
tdhp = tdhp->dh_next;
}
if (tdhp != NULL) {
size_t slen = len;
size_t mlen;
size_t soff;
soff = (ulong_t)(addr - dhpp->dh_uvaddr);
while (slen != 0) {
mlen = MIN(slen, (dhpp->dh_len - soff));
hat_unload(seg->s_as->a_hat, dhpp->dh_uvaddr,
dhpp->dh_len, HAT_UNLOAD_UNMAP);
dhpp = dhpp->dh_next;
ASSERT(slen >= mlen);
slen -= mlen;
soff = 0;
}
} else
hat_unload(seg->s_as->a_hat, addr, len,
HAT_UNLOAD_UNMAP);
} else {
hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD_UNMAP);
}
off = sdp->offset + (offset_t)(addr - seg->s_base);
ASSERT(sdp->vp != NULL);
(void) VOP_DELMAP(VTOCVP(sdp->vp), off, seg->s_as, addr, len,
sdp->prot, sdp->maxprot, sdp->type, CRED(), NULL);
if (addr == seg->s_base && len == seg->s_size) {
seg_free(seg);
return (0);
}
opages = seg_pages(seg);
dpages = btop(len);
npages = opages - dpages;
if (addr == seg->s_base) {
if (sdp->vpage != NULL) {
register struct vpage *ovpage;
ovpage = sdp->vpage;
nbytes = vpgtob(npages);
sdp->vpage = kmem_alloc(nbytes, KM_SLEEP);
bcopy(&ovpage[dpages], sdp->vpage, nbytes);
kmem_free(ovpage, vpgtob(opages));
}
if (dhp != NULL)
devmap_handle_unmap_head(dhp, len);
sdp->offset += (offset_t)len;
seg->s_base += len;
seg->s_size -= len;
return (0);
}
if (addr + len == seg->s_base + seg->s_size) {
if (sdp->vpage != NULL) {
register struct vpage *ovpage;
ovpage = sdp->vpage;
nbytes = vpgtob(npages);
sdp->vpage = kmem_alloc(nbytes, KM_SLEEP);
bcopy(ovpage, sdp->vpage, nbytes);
kmem_free(ovpage, vpgtob(opages));
}
seg->s_size -= len;
if (dhp != NULL)
devmap_handle_unmap_tail(dhp, addr);
return (0);
}
nbase = addr + len;
nsize = (seg->s_base + seg->s_size) - nbase;
seg->s_size = addr - seg->s_base;
nseg = seg_alloc(seg->s_as, nbase, nsize);
if (nseg == NULL)
panic("segdev_unmap seg_alloc");
TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_UNMAP_CK2,
"segdev_unmap: seg=%p nseg=%p", (void *)seg, (void *)nseg);
DEBUGF(3, (CE_CONT, "segdev_unmap: segdev_dup seg %p nseg %p\n",
(void *)seg, (void *)nseg));
nsdp = sdp_alloc();
nseg->s_ops = seg->s_ops;
nseg->s_data = (void *)nsdp;
VN_HOLD(sdp->vp);
nsdp->mapfunc = sdp->mapfunc;
nsdp->offset = sdp->offset + (offset_t)(nseg->s_base - seg->s_base);
nsdp->vp = sdp->vp;
nsdp->pageprot = sdp->pageprot;
nsdp->prot = sdp->prot;
nsdp->maxprot = sdp->maxprot;
nsdp->type = sdp->type;
nsdp->hat_attr = sdp->hat_attr;
nsdp->hat_flags = sdp->hat_flags;
nsdp->softlockcnt = 0;
if (sdp->vpage != NULL) {
register size_t nnbytes;
register size_t nnpages;
register struct vpage *ovpage;
ovpage = sdp->vpage;
npages = seg_pages(seg);
nbytes = vpgtob(npages);
nnpages = seg_pages(nseg);
nnbytes = vpgtob(nnpages);
sdp->vpage = kmem_alloc(nbytes, KM_SLEEP);
bcopy(ovpage, sdp->vpage, nbytes);
nsdp->vpage = kmem_alloc(nnbytes, KM_SLEEP);
bcopy(&ovpage[npages + dpages], nsdp->vpage, nnbytes);
kmem_free(ovpage, vpgtob(opages));
} else
nsdp->vpage = NULL;
if (dhp == NULL) {
nsdp->devmap_data = NULL;
return (0);
}
while (dhp != NULL) {
callbackops = &dhp->dh_callbackops;
TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_UNMAP_CK3,
"segdev_unmap: dhp=%p addr=%p", dhp, addr);
DEBUGF(3, (CE_CONT, "unmap: dhp %p addr %p uvaddr %p len %lx\n",
(void *)dhp, (void *)addr,
(void *)dhp->dh_uvaddr, dhp->dh_len));
if (addr == (dhp->dh_uvaddr + dhp->dh_len)) {
dhpp = dhp->dh_next;
dhp->dh_next = NULL;
dhp = dhpp;
} else if (addr > (dhp->dh_uvaddr + dhp->dh_len)) {
dhp = dhp->dh_next;
} else if (addr > dhp->dh_uvaddr &&
(addr + len) < (dhp->dh_uvaddr + dhp->dh_len)) {
newdhp = kmem_alloc(sizeof (devmap_handle_t), KM_SLEEP);
HOLD_DHP_LOCK(dhp);
bcopy(dhp, newdhp, sizeof (devmap_handle_t));
RELE_DHP_LOCK(dhp);
newdhp->dh_seg = nseg;
newdhp->dh_next = dhp->dh_next;
if (dhp->dh_softlock != NULL)
newdhp->dh_softlock = devmap_softlock_init(
newdhp->dh_dev,
(ulong_t)callbackops->devmap_access);
if (dhp->dh_ctx != NULL)
newdhp->dh_ctx = devmap_ctxinit(newdhp->dh_dev,
(ulong_t)callbackops->devmap_access);
if (newdhp->dh_flags & DEVMAP_LOCK_INITED) {
mutex_init(&newdhp->dh_lock,
NULL, MUTEX_DEFAULT, NULL);
}
if (callbackops->devmap_unmap != NULL)
(*callbackops->devmap_unmap)(dhp, dhp->dh_pvtp,
off, len, dhp, &dhp->dh_pvtp,
newdhp, &newdhp->dh_pvtp);
mlen = len + (addr - dhp->dh_uvaddr);
devmap_handle_reduce_len(newdhp, mlen);
nsdp->devmap_data = newdhp;
dhp->dh_len = addr - dhp->dh_uvaddr;
dhpp = dhp->dh_next;
dhp->dh_next = NULL;
dhp = dhpp;
} else if ((addr > dhp->dh_uvaddr) &&
((addr + len) >= (dhp->dh_uvaddr + dhp->dh_len))) {
mlen = dhp->dh_len + dhp->dh_uvaddr - addr;
if (callbackops->devmap_unmap != NULL)
(*callbackops->devmap_unmap)(dhp, dhp->dh_pvtp,
off, mlen, (devmap_cookie_t *)dhp,
&dhp->dh_pvtp, NULL, NULL);
dhp->dh_len = addr - dhp->dh_uvaddr;
dhpp = dhp->dh_next;
dhp->dh_next = NULL;
dhp = dhpp;
nsdp->devmap_data = dhp;
} else if ((addr + len) >= (dhp->dh_uvaddr + dhp->dh_len)) {
dhp->dh_seg = nseg;
nsdp->devmap_data = dhp;
dhp = devmap_handle_unmap(dhp);
nsdp->devmap_data = dhp;
} else if (((addr + len) > dhp->dh_uvaddr) &&
((addr + len) < (dhp->dh_uvaddr + dhp->dh_len))) {
mlen = addr + len - dhp->dh_uvaddr;
if (callbackops->devmap_unmap != NULL)
(*callbackops->devmap_unmap)(dhp, dhp->dh_pvtp,
dhp->dh_uoff, mlen, NULL,
NULL, dhp, &dhp->dh_pvtp);
devmap_handle_reduce_len(dhp, mlen);
nsdp->devmap_data = dhp;
dhp->dh_seg = nseg;
dhp = dhp->dh_next;
} else {
dhp->dh_seg = nseg;
dhp = dhp->dh_next;
}
}
return (0);
}
static void
devmap_handle_reduce_len(devmap_handle_t *dhp, size_t len)
{
struct ddi_umem_cookie *cp;
struct devmap_pmem_cookie *pcp;
ASSERT(len < dhp->dh_len);
ASSERT((len & PAGEOFFSET) == 0);
dhp->dh_len -= len;
dhp->dh_uoff += (offset_t)len;
dhp->dh_roff += (offset_t)len;
dhp->dh_uvaddr += len;
HOLD_DHP_LOCK(dhp);
cp = dhp->dh_cookie;
if (!(dhp->dh_flags & DEVMAP_MAPPING_INVALID)) {
if (cookie_is_devmem(cp)) {
dhp->dh_pfn += btop(len);
} else if (cookie_is_pmem(cp)) {
pcp = (struct devmap_pmem_cookie *)dhp->dh_pcookie;
ASSERT((dhp->dh_roff & PAGEOFFSET) == 0 &&
dhp->dh_roff < ptob(pcp->dp_npages));
} else {
ASSERT(dhp->dh_roff < cp->size);
ASSERT(dhp->dh_cvaddr >= cp->cvaddr &&
dhp->dh_cvaddr < (cp->cvaddr + cp->size));
ASSERT((dhp->dh_cvaddr + len) <=
(cp->cvaddr + cp->size));
dhp->dh_cvaddr += len;
}
}
RELE_DHP_LOCK(dhp);
}
static devmap_handle_t *
devmap_handle_unmap(devmap_handle_t *dhp)
{
struct devmap_callback_ctl *callbackops = &dhp->dh_callbackops;
struct segdev_data *sdp = (struct segdev_data *)dhp->dh_seg->s_data;
devmap_handle_t *dhpp = (devmap_handle_t *)sdp->devmap_data;
ASSERT(dhp != NULL);
if (callbackops->devmap_unmap != NULL) {
(*callbackops->devmap_unmap)(dhp, dhp->dh_pvtp, dhp->dh_uoff,
dhp->dh_len, NULL, NULL, NULL, NULL);
}
if (dhpp == dhp) {
sdp->devmap_data = dhp->dh_next;
} else {
while (dhpp->dh_next != dhp) {
dhpp = dhpp->dh_next;
}
dhpp->dh_next = dhp->dh_next;
}
dhpp = dhp->dh_next;
if (dhp->dh_softlock != NULL)
devmap_softlock_rele(dhp);
if (dhp->dh_ctx != NULL)
devmap_ctx_rele(dhp);
if (dhp->dh_flags & DEVMAP_LOCK_INITED) {
mutex_destroy(&dhp->dh_lock);
}
kmem_free(dhp, sizeof (devmap_handle_t));
return (dhpp);
}
static void
devmap_handle_unmap_head(devmap_handle_t *dhp, size_t len)
{
struct devmap_callback_ctl *callbackops;
while (len >= dhp->dh_len) {
len -= dhp->dh_len;
dhp = devmap_handle_unmap(dhp);
}
if (len != 0) {
callbackops = &dhp->dh_callbackops;
if (callbackops->devmap_unmap != NULL)
(*callbackops->devmap_unmap)(dhp, dhp->dh_pvtp,
dhp->dh_uoff, len, NULL, NULL, dhp, &dhp->dh_pvtp);
devmap_handle_reduce_len(dhp, len);
}
}
static void
devmap_handle_unmap_tail(devmap_handle_t *dhp, caddr_t addr)
{
register struct seg *seg = dhp->dh_seg;
register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
register devmap_handle_t *dhph = (devmap_handle_t *)sdp->devmap_data;
struct devmap_callback_ctl *callbackops;
register devmap_handle_t *dhpp;
size_t maplen;
ulong_t off;
size_t len;
maplen = (size_t)(addr - dhp->dh_uvaddr);
dhph = devmap_find_handle(dhph, addr);
while (dhph != NULL) {
if (maplen == 0) {
dhph = devmap_handle_unmap(dhph);
} else {
callbackops = &dhph->dh_callbackops;
len = dhph->dh_len - maplen;
off = (ulong_t)sdp->offset + (addr - seg->s_base);
if (callbackops->devmap_unmap != NULL)
(*callbackops->devmap_unmap)(dhph,
dhph->dh_pvtp, off, len,
(devmap_cookie_t *)dhph,
&dhph->dh_pvtp, NULL, NULL);
dhph->dh_len = maplen;
maplen = 0;
dhpp = dhph->dh_next;
dhph->dh_next = NULL;
dhph = dhpp;
}
}
}
static void
segdev_free(struct seg *seg)
{
register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
devmap_handle_t *dhp = (devmap_handle_t *)sdp->devmap_data;
TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_FREE,
"segdev_free: dhp=%p seg=%p", (void *)dhp, (void *)seg);
DEBUGF(3, (CE_CONT, "segdev_free: dhp %p seg %p\n",
(void *)dhp, (void *)seg));
ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
while (dhp != NULL)
dhp = devmap_handle_unmap(dhp);
VN_RELE(sdp->vp);
if (sdp->vpage != NULL)
kmem_free(sdp->vpage, vpgtob(seg_pages(seg)));
rw_destroy(&sdp->lock);
kmem_free(sdp, sizeof (*sdp));
}
static void
free_devmap_handle(devmap_handle_t *dhp)
{
register devmap_handle_t *dhpp;
while (dhp != NULL) {
dhpp = dhp->dh_next;
if (dhp->dh_flags & DEVMAP_LOCK_INITED) {
mutex_destroy(&dhp->dh_lock);
}
if (dhp->dh_softlock != NULL)
devmap_softlock_rele(dhp);
if (dhp->dh_ctx != NULL)
devmap_ctx_rele(dhp);
kmem_free(dhp, sizeof (devmap_handle_t));
dhp = dhpp;
}
}
static faultcode_t
acquire_kpmem_lock(struct ddi_umem_cookie *cookie, size_t npages)
{
int err = 0;
ASSERT(cookie_is_kpmem(cookie));
mutex_enter(&cookie->lock);
if ((UINT32_MAX - cookie->locked) < npages) {
err = FC_MAKE_ERR(ENOMEM);
} else if (cookie->locked == 0) {
err = as_fault(kas.a_hat, &kas, cookie->cvaddr,
cookie->size, F_SOFTLOCK, PROT_READ|PROT_WRITE);
}
if (!err) {
cookie->locked += npages;
}
mutex_exit(&cookie->lock);
return (err);
}
static void
release_kpmem_lock(struct ddi_umem_cookie *cookie, size_t npages)
{
mutex_enter(&cookie->lock);
ASSERT(cookie_is_kpmem(cookie));
ASSERT(cookie->locked >= npages);
cookie->locked -= (uint_t)npages;
if (cookie->locked == 0) {
if (as_fault(kas.a_hat, &kas, cookie->cvaddr,
cookie->size, F_SOFTUNLOCK, PROT_READ|PROT_WRITE))
panic("segdev releasing kpmem lock %p", (void *)cookie);
}
mutex_exit(&cookie->lock);
}
static int devmap_softlock_enter(
struct devmap_softlock *slock,
size_t npages,
enum fault_type type)
{
if (npages == 0)
return (0);
mutex_enter(&(slock->lock));
switch (type) {
case F_SOFTLOCK :
while (slock->softlocked) {
if (cv_wait_sig(&(slock)->cv, &(slock)->lock) == 0) {
mutex_exit(&(slock->lock));
return (EINTR);
}
}
slock->softlocked -= npages;
break;
case F_INVAL :
case F_PROT :
while (slock->softlocked < 0)
if (cv_wait_sig(&(slock)->cv, &(slock)->lock) == 0) {
mutex_exit(&(slock->lock));
return (EINTR);
}
slock->softlocked += npages;
break;
default:
ASSERT(0);
}
mutex_exit(&(slock->lock));
return (0);
}
static void devmap_softlock_exit(
struct devmap_softlock *slock,
size_t npages,
enum fault_type type)
{
if (slock == NULL)
return;
mutex_enter(&(slock->lock));
switch (type) {
case F_SOFTLOCK :
ASSERT(-slock->softlocked >= npages);
slock->softlocked += npages;
if (slock->softlocked == 0)
cv_signal(&slock->cv);
break;
case F_INVAL :
case F_PROT:
ASSERT(slock->softlocked >= npages);
slock->softlocked -= npages;
if (slock->softlocked == 0)
cv_signal(&slock->cv);
break;
default:
ASSERT(0);
}
mutex_exit(&(slock->lock));
}
static void
segdev_softunlock(
struct hat *hat,
struct seg *seg,
caddr_t addr,
size_t len,
enum seg_rw rw)
{
struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
devmap_handle_t *dhp_head = (devmap_handle_t *)sdp->devmap_data;
TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_SOFTUNLOCK,
"segdev_softunlock:dhp_head=%p sdp=%p addr=%p len=%lx",
dhp_head, sdp, addr, len);
DEBUGF(3, (CE_CONT, "segdev_softunlock: dhp %p lockcnt %lx "
"addr %p len %lx\n",
(void *)dhp_head, sdp->softlockcnt, (void *)addr, len));
hat_unlock(hat, addr, len);
if (dhp_head != NULL) {
devmap_handle_t *dhp;
size_t mlen;
size_t tlen = len;
ulong_t off;
dhp = devmap_find_handle(dhp_head, addr);
ASSERT(dhp != NULL);
off = (ulong_t)(addr - dhp->dh_uvaddr);
while (tlen != 0) {
mlen = MIN(tlen, (dhp->dh_len - off));
if (dhp_is_kpmem(dhp)) {
release_kpmem_lock(
(struct ddi_umem_cookie *)dhp->dh_cookie,
btopr(mlen));
}
if (dhp->dh_callbackops.devmap_access != NULL) {
devmap_softlock_exit(dhp->dh_softlock,
btopr(mlen), F_SOFTLOCK);
}
tlen -= mlen;
dhp = dhp->dh_next;
off = 0;
}
}
mutex_enter(&freemem_lock);
ASSERT(sdp->softlockcnt >= btopr(len));
sdp->softlockcnt -= btopr(len);
mutex_exit(&freemem_lock);
if (sdp->softlockcnt == 0) {
if (AS_ISUNMAPWAIT(seg->s_as)) {
mutex_enter(&seg->s_as->a_contents);
if (AS_ISUNMAPWAIT(seg->s_as)) {
AS_CLRUNMAPWAIT(seg->s_as);
cv_broadcast(&seg->s_as->a_cv);
}
mutex_exit(&seg->s_as->a_contents);
}
}
}
static faultcode_t
segdev_faultpage(
struct hat *hat,
struct seg *seg,
caddr_t addr,
struct vpage *vpage,
enum fault_type type,
enum seg_rw rw,
devmap_handle_t *dhp)
{
struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
uint_t prot;
pfn_t pfnum = PFN_INVALID;
u_offset_t offset;
uint_t hat_flags;
dev_info_t *dip;
TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_FAULTPAGE,
"segdev_faultpage: dhp=%p seg=%p addr=%p", dhp, seg, addr);
DEBUGF(8, (CE_CONT, "segdev_faultpage: dhp %p seg %p addr %p \n",
(void *)dhp, (void *)seg, (void *)addr));
if (sdp->pageprot) {
uint_t protchk;
switch (rw) {
case S_READ:
protchk = PROT_READ;
break;
case S_WRITE:
protchk = PROT_WRITE;
break;
case S_EXEC:
protchk = PROT_EXEC;
break;
case S_OTHER:
default:
protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
break;
}
prot = VPP_PROT(vpage);
if ((prot & protchk) == 0)
return (FC_PROT);
} else {
prot = sdp->prot;
}
if (type == F_SOFTLOCK) {
mutex_enter(&freemem_lock);
sdp->softlockcnt++;
mutex_exit(&freemem_lock);
}
hat_flags = ((type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD);
offset = sdp->offset + (u_offset_t)(addr - seg->s_base);
if (dhp == NULL) {
ASSERT(sdp->devmap_data == NULL);
pfnum = (pfn_t)cdev_mmap(sdp->mapfunc, sdp->vp->v_rdev,
(off_t)offset, prot);
prot |= sdp->hat_attr;
} else {
ulong_t off;
struct ddi_umem_cookie *cp;
struct devmap_pmem_cookie *pcp;
ASSERT(dhp == devmap_find_handle(
(devmap_handle_t *)sdp->devmap_data, addr));
off = addr - dhp->dh_uvaddr;
cp = dhp->dh_cookie;
if (dhp->dh_flags & DEVMAP_MAPPING_INVALID) {
pfnum = PFN_INVALID;
} else if (cookie_is_devmem(cp)) {
pfnum = dhp->dh_pfn + btop(off);
} else if (cookie_is_pmem(cp)) {
pcp = (struct devmap_pmem_cookie *)dhp->dh_pcookie;
ASSERT((dhp->dh_roff & PAGEOFFSET) == 0 &&
dhp->dh_roff < ptob(pcp->dp_npages));
pfnum = page_pptonum(
pcp->dp_pparray[btop(off + dhp->dh_roff)]);
} else {
ASSERT(dhp->dh_roff < cp->size);
ASSERT(dhp->dh_cvaddr >= cp->cvaddr &&
dhp->dh_cvaddr < (cp->cvaddr + cp->size));
ASSERT((dhp->dh_cvaddr + off) <=
(cp->cvaddr + cp->size));
ASSERT((dhp->dh_cvaddr + off + PAGESIZE) <=
(cp->cvaddr + cp->size));
switch (cp->type) {
case UMEM_LOCKED :
if (cp->pparray != NULL) {
ASSERT((dhp->dh_roff &
PAGEOFFSET) == 0);
pfnum = page_pptonum(
cp->pparray[btop(off +
dhp->dh_roff)]);
} else {
pfnum = hat_getpfnum(
((proc_t *)cp->procp)->p_as->a_hat,
cp->cvaddr + off);
}
break;
case UMEM_TRASH :
pfnum = page_pptonum(trashpp);
break;
case KMEM_PAGEABLE:
case KMEM_NON_PAGEABLE:
pfnum = hat_getpfnum(kas.a_hat,
dhp->dh_cvaddr + off);
break;
default :
pfnum = PFN_INVALID;
break;
}
}
prot |= dhp->dh_hat_attr;
}
if (pfnum == PFN_INVALID) {
return (FC_MAKE_ERR(EFAULT));
}
TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_FAULTPAGE_CK1,
"segdev_faultpage: pfnum=%lx memory=%x prot=%x flags=%x",
pfnum, pf_is_memory(pfnum), prot, hat_flags);
DEBUGF(9, (CE_CONT, "segdev_faultpage: pfnum %lx memory %x "
"prot %x flags %x\n", pfnum, pf_is_memory(pfnum), prot, hat_flags));
if (pf_is_memory(pfnum) || (dhp != NULL)) {
hat_devload(hat, addr, PAGESIZE, pfnum,
prot, hat_flags | sdp->hat_flags);
return (0);
}
dip = VTOS(VTOCVP(sdp->vp))->s_dip;
ASSERT(dip);
if (ddi_map_fault(dip, hat, seg, addr, NULL, pfnum, prot & PROT_ALL,
(uint_t)(type == F_SOFTLOCK)) != DDI_SUCCESS) {
return (FC_MAKE_ERR(EFAULT));
}
return (0);
}
static faultcode_t
segdev_fault(
struct hat *hat,
struct seg *seg,
caddr_t addr,
size_t len,
enum fault_type type,
enum seg_rw rw)
{
struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
devmap_handle_t *dhp_head = (devmap_handle_t *)sdp->devmap_data;
devmap_handle_t *dhp;
struct devmap_softlock *slock = NULL;
ulong_t slpage = 0;
ulong_t off;
caddr_t maddr = addr;
int err;
int err_is_faultcode = 0;
TRACE_5(TR_FAC_DEVMAP, TR_DEVMAP_FAULT,
"segdev_fault: dhp_head=%p seg=%p addr=%p len=%lx type=%x",
(void *)dhp_head, (void *)seg, (void *)addr, len, type);
DEBUGF(7, (CE_CONT, "segdev_fault: dhp_head %p seg %p "
"addr %p len %lx type %x\n",
(void *)dhp_head, (void *)seg, (void *)addr, len, type));
ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
if (dhp_head == NULL)
return (segdev_faultpages(hat, seg, addr, len, type, rw, NULL));
if ((dhp = devmap_find_handle(dhp_head, addr)) == NULL)
return (FC_NOMAP);
if (type == F_PROT) {
return (FC_PROT);
}
if (type != F_SOFTUNLOCK) {
devmap_handle_t *dhpp = dhp;
size_t slen = len;
off = (ulong_t)(addr - dhpp->dh_uvaddr);
while (slen != 0) {
size_t mlen;
if ((type == F_SOFTLOCK) &&
(dhpp->dh_flags & DEVMAP_ALLOW_REMAP)) {
return (FC_NOSUPPORT);
}
mlen = MIN(slen, (dhpp->dh_len - off));
if (dhpp->dh_callbackops.devmap_access) {
size_t llen;
caddr_t laddr;
HOLD_DHP_LOCK(dhpp);
if ((sdp->pageprot == 0) &&
(dhpp->dh_flags & DEVMAP_FLAG_LARGE)) {
devmap_get_large_pgsize(dhpp,
mlen, maddr, &llen, &laddr);
} else {
llen = mlen;
}
RELE_DHP_LOCK(dhpp);
slpage += btopr(llen);
slock = dhpp->dh_softlock;
}
maddr += mlen;
ASSERT(slen >= mlen);
slen -= mlen;
dhpp = dhpp->dh_next;
off = 0;
}
if (err = devmap_softlock_enter(slock, slpage, type))
return (FC_MAKE_ERR(err));
}
maddr = addr;
off = (ulong_t)(addr - dhp->dh_uvaddr);
while (len != 0) {
size_t llen;
size_t mlen;
caddr_t laddr;
mlen = MIN(len, (dhp->dh_len - off));
HOLD_DHP_LOCK(dhp);
if ((sdp->pageprot == 0) &&
(dhp->dh_flags & DEVMAP_FLAG_LARGE)) {
devmap_get_large_pgsize(dhp, mlen, maddr,
&llen, &laddr);
ASSERT(maddr == addr || laddr == maddr);
} else {
llen = mlen;
laddr = maddr;
}
if (dhp->dh_callbackops.devmap_access != NULL) {
offset_t aoff;
aoff = sdp->offset + (offset_t)(laddr - seg->s_base);
RELE_DHP_LOCK(dhp);
err = (*dhp->dh_callbackops.devmap_access)(
dhp, (void *)dhp->dh_pvtp, aoff, llen, type, rw);
} else {
err = segdev_faultpages(hat, seg, laddr, llen,
type, rw, dhp);
err_is_faultcode = 1;
RELE_DHP_LOCK(dhp);
}
if (err) {
if ((type == F_SOFTLOCK) && (maddr > addr)) {
size_t done = (size_t)(maddr - addr);
(void) segdev_fault(hat, seg, addr, done,
F_SOFTUNLOCK, S_OTHER);
ASSERT(slpage >= btopr(done));
devmap_softlock_exit(slock,
slpage - btopr(done), type);
} else {
devmap_softlock_exit(slock, slpage, type);
}
if (err_is_faultcode)
return (err);
return (FC_MAKE_ERR(err));
}
maddr += mlen;
ASSERT(len >= mlen);
len -= mlen;
dhp = dhp->dh_next;
off = 0;
ASSERT(!dhp || len == 0 || maddr == dhp->dh_uvaddr);
}
if ((type == F_INVAL) || (type == F_PROT))
devmap_softlock_exit(slock, slpage, type);
return (0);
}
static faultcode_t
segdev_faultpages(
struct hat *hat,
struct seg *seg,
caddr_t addr,
size_t len,
enum fault_type type,
enum seg_rw rw,
devmap_handle_t *dhp)
{
register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
register caddr_t a;
struct vpage *vpage;
struct ddi_umem_cookie *kpmem_cookie = NULL;
int err;
TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_FAULTPAGES,
"segdev_faultpages: dhp=%p seg=%p addr=%p len=%lx",
(void *)dhp, (void *)seg, (void *)addr, len);
DEBUGF(5, (CE_CONT, "segdev_faultpages: "
"dhp %p seg %p addr %p len %lx\n",
(void *)dhp, (void *)seg, (void *)addr, len));
if (type == F_PROT) {
return (FC_PROT);
}
if (type == F_SOFTUNLOCK) {
segdev_softunlock(hat, seg, addr, len, rw);
return (0);
}
if ((dhp != NULL) && dhp_is_kpmem(dhp)) {
kpmem_cookie = (struct ddi_umem_cookie *)dhp->dh_cookie;
if (err = acquire_kpmem_lock(kpmem_cookie, btopr(len)))
return (err);
}
rw_enter(&sdp->lock, RW_READER);
if (sdp->pageprot == 0) {
uint_t protchk;
switch (rw) {
case S_READ:
protchk = PROT_READ;
break;
case S_WRITE:
protchk = PROT_WRITE;
break;
case S_EXEC:
protchk = PROT_EXEC;
break;
case S_OTHER:
default:
protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
break;
}
if ((sdp->prot & protchk) == 0) {
rw_exit(&sdp->lock);
if (kpmem_cookie != NULL) {
release_kpmem_lock(kpmem_cookie, btopr(len));
}
return (FC_PROT);
}
}
if ((sdp->pageprot == 0) && (dhp != NULL) && dhp_is_devmem(dhp)) {
pfn_t pfnum;
uint_t hat_flags;
if (dhp->dh_flags & DEVMAP_MAPPING_INVALID) {
rw_exit(&sdp->lock);
return (FC_NOMAP);
}
if (type == F_SOFTLOCK) {
mutex_enter(&freemem_lock);
sdp->softlockcnt += btopr(len);
mutex_exit(&freemem_lock);
}
hat_flags = ((type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD);
pfnum = dhp->dh_pfn + btop((uintptr_t)(addr - dhp->dh_uvaddr));
ASSERT(!pf_is_memory(pfnum));
hat_devload(hat, addr, len, pfnum, sdp->prot | dhp->dh_hat_attr,
hat_flags | sdp->hat_flags);
rw_exit(&sdp->lock);
return (0);
}
if (sdp->vpage == NULL)
vpage = NULL;
else
vpage = &sdp->vpage[seg_page(seg, addr)];
for (a = addr; a < addr + len; a += PAGESIZE) {
if (err = segdev_faultpage(hat, seg, a, vpage, type, rw, dhp)) {
break;
}
if (vpage != NULL)
vpage++;
}
rw_exit(&sdp->lock);
if (err && (type == F_SOFTLOCK)) {
size_t done = (size_t)(a - addr);
if (done > 0) {
segdev_softunlock(hat, seg, addr, done, S_OTHER);
}
if (kpmem_cookie != NULL) {
ASSERT(len >= done);
release_kpmem_lock(kpmem_cookie, btopr(len - done));
}
} else if ((kpmem_cookie != NULL) && (type != F_SOFTLOCK)) {
release_kpmem_lock(kpmem_cookie, btopr(len));
}
return (err);
}
static faultcode_t
segdev_faulta(struct seg *seg, caddr_t addr)
{
TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_FAULTA,
"segdev_faulta: seg=%p addr=%p", (void *)seg, (void *)addr);
ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
return (0);
}
static int
segdev_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
{
register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
register devmap_handle_t *dhp;
register struct vpage *vp, *evp;
devmap_handle_t *dhp_head = (devmap_handle_t *)sdp->devmap_data;
ulong_t off;
size_t mlen, sz;
TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_SETPROT,
"segdev_setprot:start seg=%p addr=%p len=%lx prot=%x",
(void *)seg, (void *)addr, len, prot);
ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
if ((sz = sdp->softlockcnt) > 0 && dhp_head != NULL) {
TRACE_1(TR_FAC_DEVMAP, TR_DEVMAP_SETPROT_CK1,
"segdev_setprot:error softlockcnt=%lx", sz);
DEBUGF(1, (CE_CONT, "segdev_setprot: softlockcnt %ld\n", sz));
return (EAGAIN);
}
if (dhp_head != NULL) {
if ((dhp = devmap_find_handle(dhp_head, addr)) == NULL)
return (EINVAL);
off = (ulong_t)(addr - dhp->dh_uvaddr);
mlen = len;
while (dhp) {
if ((dhp->dh_maxprot & prot) != prot)
return (EACCES);
if (mlen > (dhp->dh_len - off)) {
mlen -= dhp->dh_len - off;
dhp = dhp->dh_next;
off = 0;
} else
break;
}
} else {
if ((sdp->maxprot & prot) != prot)
return (EACCES);
}
rw_enter(&sdp->lock, RW_WRITER);
if (addr == seg->s_base && len == seg->s_size && sdp->pageprot == 0) {
if (sdp->prot == prot) {
rw_exit(&sdp->lock);
return (0);
}
sdp->prot = (uchar_t)prot;
} else {
sdp->pageprot = 1;
if (sdp->vpage == NULL) {
sdp->vpage = kmem_zalloc(vpgtob(seg_pages(seg)),
KM_SLEEP);
evp = &sdp->vpage[seg_pages(seg)];
for (vp = sdp->vpage; vp < evp; vp++)
VPP_SETPROT(vp, sdp->prot);
}
evp = &sdp->vpage[seg_page(seg, addr + len)];
for (vp = &sdp->vpage[seg_page(seg, addr)]; vp < evp; vp++)
VPP_SETPROT(vp, prot);
}
rw_exit(&sdp->lock);
if (dhp_head != NULL) {
devmap_handle_t *tdhp;
dhp = tdhp = devmap_find_handle(dhp_head, addr);
while (tdhp != NULL) {
if (tdhp->dh_flags & DEVMAP_FLAG_LARGE) {
break;
}
tdhp = tdhp->dh_next;
}
if (tdhp) {
size_t slen = len;
size_t mlen;
size_t soff;
soff = (ulong_t)(addr - dhp->dh_uvaddr);
while (slen != 0) {
mlen = MIN(slen, (dhp->dh_len - soff));
hat_unload(seg->s_as->a_hat, dhp->dh_uvaddr,
dhp->dh_len, HAT_UNLOAD);
dhp = dhp->dh_next;
ASSERT(slen >= mlen);
slen -= mlen;
soff = 0;
}
return (0);
}
}
if ((prot & ~PROT_USER) == PROT_NONE) {
hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD);
} else {
hat_chgprot(seg->s_as->a_hat, addr, len, prot);
}
return (0);
}
static int
segdev_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
{
struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
struct vpage *vp, *evp;
TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_CHECKPROT,
"segdev_checkprot:start seg=%p addr=%p len=%lx prot=%x",
(void *)seg, (void *)addr, len, prot);
ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
rw_enter(&sdp->lock, RW_READER);
if (sdp->pageprot == 0) {
register int err;
err = ((sdp->prot & prot) != prot) ? EACCES : 0;
rw_exit(&sdp->lock);
return (err);
}
evp = &sdp->vpage[seg_page(seg, addr + len)];
for (vp = &sdp->vpage[seg_page(seg, addr)]; vp < evp; vp++) {
if ((VPP_PROT(vp) & prot) != prot) {
rw_exit(&sdp->lock);
return (EACCES);
}
}
rw_exit(&sdp->lock);
return (0);
}
static int
segdev_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
{
struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
size_t pgno;
TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_GETPROT,
"segdev_getprot:start seg=%p addr=%p len=%lx protv=%p",
(void *)seg, (void *)addr, len, (void *)protv);
ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1;
if (pgno != 0) {
rw_enter(&sdp->lock, RW_READER);
if (sdp->pageprot == 0) {
do {
protv[--pgno] = sdp->prot;
} while (pgno != 0);
} else {
size_t pgoff = seg_page(seg, addr);
do {
pgno--;
protv[pgno] =
VPP_PROT(&sdp->vpage[pgno + pgoff]);
} while (pgno != 0);
}
rw_exit(&sdp->lock);
}
return (0);
}
static u_offset_t
segdev_getoffset(register struct seg *seg, caddr_t addr)
{
register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_GETOFFSET,
"segdev_getoffset:start seg=%p addr=%p", (void *)seg, (void *)addr);
ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
return ((u_offset_t)sdp->offset + (addr - seg->s_base));
}
static int
segdev_gettype(register struct seg *seg, caddr_t addr)
{
register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_GETTYPE,
"segdev_gettype:start seg=%p addr=%p", (void *)seg, (void *)addr);
ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
return (sdp->type);
}
static int
segdev_getvp(register struct seg *seg, caddr_t addr, struct vnode **vpp)
{
register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_GETVP,
"segdev_getvp:start seg=%p addr=%p", (void *)seg, (void *)addr);
ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
*vpp = VTOCVP(sdp->vp);
return (0);
}
static void
segdev_badop(void)
{
TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SEGDEV_BADOP,
"segdev_badop:start");
panic("segdev_badop");
}
static int
segdev_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags)
{
TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SYNC, "segdev_sync:start");
ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
return (0);
}
static size_t
segdev_incore(struct seg *seg, caddr_t addr, size_t len, char *vec)
{
size_t v = 0;
TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_INCORE, "segdev_incore:start");
ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
for (len = (len + PAGEOFFSET) & PAGEMASK; len; len -= PAGESIZE,
v += PAGESIZE)
*vec++ = 1;
return (v);
}
static int
segdev_lockop(struct seg *seg, caddr_t addr,
size_t len, int attr, int op, ulong_t *lockmap, size_t pos)
{
TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_LOCKOP, "segdev_lockop:start");
ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
return (0);
}
static int
segdev_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
{
TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_ADVISE, "segdev_advise:start");
ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
return (0);
}
static void
segdev_dump(struct seg *seg __unused)
{
}
int
ddi_segmap_setup(dev_t dev, off_t offset, struct as *as, caddr_t *addrp,
off_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cred,
const ddi_device_acc_attr_t *accattrp, uint_t rnumber)
{
struct segdev_crargs dev_a;
int (*mapfunc)(dev_t dev, off_t off, int prot);
uint_t hat_attr;
pfn_t pfn;
int error, i;
TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SEGMAP_SETUP,
"ddi_segmap_setup:start");
if ((mapfunc = devopsp[getmajor(dev)]->devo_cb_ops->cb_mmap) == nodev)
return (ENODEV);
if ((flags & MAP_TYPE) != MAP_SHARED)
return (EINVAL);
if (ddi_device_mapping_check(dev, accattrp, rnumber, &hat_attr) == -1)
return (ENXIO);
if (len == 0)
return (ENXIO);
pfn = (pfn_t)cdev_mmap(mapfunc, dev, offset, maxprot);
if (pfn == PFN_INVALID)
return (ENXIO);
for (i = PAGESIZE; i < len; i += PAGESIZE) {
if (cdev_mmap(mapfunc, dev, offset + i, maxprot) == PFN_INVALID)
return (ENXIO);
}
as_rangelock(as);
error = choose_addr(as, addrp, len, ptob(pfn), ADDR_NOVACALIGN, flags);
if (error != 0) {
as_rangeunlock(as);
return (error);
}
dev_a.mapfunc = mapfunc;
dev_a.dev = dev;
dev_a.offset = (offset_t)offset;
dev_a.type = flags & MAP_TYPE;
dev_a.prot = (uchar_t)prot;
dev_a.maxprot = (uchar_t)maxprot;
dev_a.hat_attr = hat_attr;
dev_a.hat_flags = 0;
dev_a.devmap_data = NULL;
error = as_map(as, *addrp, len, segdev_create, &dev_a);
as_rangeunlock(as);
return (error);
}
static int
segdev_pagelock(struct seg *seg, caddr_t addr, size_t len,
struct page ***ppp, enum lock_type type, enum seg_rw rw)
{
TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_PAGELOCK,
"segdev_pagelock:start");
return (ENOTSUP);
}
static int
segdev_setpagesize(struct seg *seg, caddr_t addr, size_t len,
uint_t szc)
{
return (ENOTSUP);
}
static int
devmap_device(devmap_handle_t *dhp, struct as *as, caddr_t *addr,
offset_t off, size_t len, uint_t flags)
{
devmap_handle_t *rdhp, *maxdhp;
struct segdev_crargs dev_a;
int err;
uint_t maxprot = PROT_ALL;
offset_t offset = 0;
pfn_t pfn;
struct devmap_pmem_cookie *pcp;
TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_DEVICE,
"devmap_device:start dhp=%p addr=%p off=%llx, len=%lx",
(void *)dhp, (void *)addr, off, len);
DEBUGF(2, (CE_CONT, "devmap_device: dhp %p addr %p off %llx len %lx\n",
(void *)dhp, (void *)addr, off, len));
as_rangelock(as);
if ((flags & MAP_FIXED) == 0) {
offset_t aligned_off;
rdhp = maxdhp = dhp;
while (rdhp != NULL) {
maxdhp = (maxdhp->dh_len > rdhp->dh_len) ?
maxdhp : rdhp;
rdhp = rdhp->dh_next;
maxprot |= dhp->dh_maxprot;
}
offset = maxdhp->dh_uoff - dhp->dh_uoff;
if (maxdhp->dh_flags & DEVMAP_MAPPING_INVALID) {
aligned_off = (offset_t)0;
} else if (dhp_is_devmem(maxdhp)) {
aligned_off = (offset_t)ptob(maxdhp->dh_pfn) - offset;
} else if (dhp_is_pmem(maxdhp)) {
pcp = (struct devmap_pmem_cookie *)maxdhp->dh_pcookie;
pfn = page_pptonum(
pcp->dp_pparray[btop(maxdhp->dh_roff)]);
aligned_off = (offset_t)ptob(pfn) - offset;
} else {
aligned_off = (offset_t)(uintptr_t)maxdhp->dh_cvaddr -
offset;
}
map_addr(addr, len, aligned_off, 1, flags);
if (*addr == NULL) {
as_rangeunlock(as);
return (ENOMEM);
}
} else {
(void) as_unmap(as, *addr, len);
}
dev_a.mapfunc = NULL;
dev_a.dev = dhp->dh_dev;
dev_a.type = flags & MAP_TYPE;
dev_a.offset = off;
dev_a.maxprot = maxprot;
dev_a.prot = dhp->dh_prot;
dev_a.hat_flags = 0;
dev_a.hat_attr = 0;
dev_a.devmap_data = (void *)dhp;
err = as_map(as, *addr, len, segdev_create, &dev_a);
as_rangeunlock(as);
return (err);
}
int
devmap_do_ctxmgt(devmap_cookie_t dhc, void *pvtp, offset_t off, size_t len,
uint_t type, uint_t rw, int (*ctxmgt)(devmap_cookie_t, void *, offset_t,
size_t, uint_t, uint_t))
{
register devmap_handle_t *dhp = (devmap_handle_t *)dhc;
struct devmap_ctx *devctx;
int do_timeout = 0;
int ret;
#ifdef lint
pvtp = pvtp;
#endif
TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_DO_CTXMGT,
"devmap_do_ctxmgt:start dhp=%p off=%llx, len=%lx",
(void *)dhp, off, len);
DEBUGF(7, (CE_CONT, "devmap_do_ctxmgt: dhp %p off %llx len %lx\n",
(void *)dhp, off, len));
if (ctxmgt == NULL)
return (FC_HWERR);
devctx = dhp->dh_ctx;
if ((dhp->dh_timeout_length > 0) && (ncpus > 1)) {
TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_DO_CTXMGT_CK1,
"devmap_do_ctxmgt:doing hysteresis, devctl %p dhp %p",
devctx, dhp);
do_timeout = 1;
mutex_enter(&devctx->lock);
while (devctx->oncpu)
cv_wait(&devctx->cv, &devctx->lock);
devctx->oncpu = 1;
mutex_exit(&devctx->lock);
}
ret = (*ctxmgt)(dhp, dhp->dh_pvtp, off, len, type, rw);
if (ret) {
TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_DO_CTXMGT_CK2,
"devmap_do_ctxmgt: ret=%x dhp=%p devctx=%p",
ret, dhp, devctx);
DEBUGF(1, (CE_CONT, "devmap_do_ctxmgt: ret %x dhp %p\n",
ret, (void *)dhp));
if (devctx->oncpu) {
mutex_enter(&devctx->lock);
devctx->oncpu = 0;
cv_signal(&devctx->cv);
mutex_exit(&devctx->lock);
}
return (FC_HWERR);
}
if (do_timeout) {
mutex_enter(&devctx->lock);
if (dhp->dh_timeout_length > 0) {
TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_DO_CTXMGT_CK3,
"devmap_do_ctxmgt:timeout set");
devctx->timeout = timeout(devmap_ctxto,
devctx, dhp->dh_timeout_length);
} else {
TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_DO_CTXMGT_CK4,
"devmap_do_ctxmgt:timeout not set");
devctx->oncpu = 0;
cv_signal(&devctx->cv);
}
mutex_exit(&devctx->lock);
}
return (DDI_SUCCESS);
}
static ulong_t
devmap_roundup(devmap_handle_t *dhp, ulong_t offset, size_t len,
ulong_t *opfn, ulong_t *pagesize)
{
register int level;
ulong_t pg;
ulong_t poff;
ulong_t base;
caddr_t uvaddr;
long rlen;
TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_ROUNDUP,
"devmap_roundup:start dhp=%p off=%lx len=%lx",
(void *)dhp, offset, len);
DEBUGF(2, (CE_CONT, "devmap_roundup: dhp %p off %lx len %lx\n",
(void *)dhp, offset, len));
pg = 0;
poff = 0;
base = (ulong_t)ptob(dhp->dh_pfn);
for (level = dhp->dh_mmulevel; level >= 0; level--) {
pg = page_get_pagesize(level);
poff = ((base + offset) & ~(pg - 1));
uvaddr = dhp->dh_uvaddr + (poff - base);
if ((poff >= base) &&
((poff + pg) <= (base + dhp->dh_len)) &&
VA_PA_ALIGNED((uintptr_t)uvaddr, poff, pg))
break;
}
TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_ROUNDUP_CK1,
"devmap_roundup: base=%lx poff=%lx dhp=%p",
base, poff, dhp);
DEBUGF(2, (CE_CONT, "devmap_roundup: base %lx poff %lx pfn %lx\n",
base, poff, dhp->dh_pfn));
ASSERT(VA_PA_ALIGNED((uintptr_t)uvaddr, poff, pg));
ASSERT(level >= 0);
*pagesize = pg;
*opfn = dhp->dh_pfn + btop(poff - base);
rlen = len + offset - (poff - base + pg);
ASSERT(rlen < (long)len);
TRACE_5(TR_FAC_DEVMAP, TR_DEVMAP_ROUNDUP_CK2,
"devmap_roundup:ret dhp=%p level=%x rlen=%lx psiz=%p opfn=%p",
(void *)dhp, level, rlen, pagesize, opfn);
DEBUGF(1, (CE_CONT, "devmap_roundup: dhp %p "
"level %x rlen %lx psize %lx opfn %lx\n",
(void *)dhp, level, rlen, *pagesize, *opfn));
return ((ulong_t)((rlen > 0) ? rlen : 0));
}
static devmap_handle_t *
devmap_find_handle(devmap_handle_t *dhp_head, caddr_t addr)
{
devmap_handle_t *dhp;
TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_FIND_HANDLE,
"devmap_find_handle:start");
dhp = dhp_head;
while (dhp) {
if (addr >= dhp->dh_uvaddr &&
addr < (dhp->dh_uvaddr + dhp->dh_len))
return (dhp);
dhp = dhp->dh_next;
}
return ((devmap_handle_t *)NULL);
}
int
devmap_unload(devmap_cookie_t dhc, offset_t offset, size_t len)
{
register devmap_handle_t *dhp = (devmap_handle_t *)dhc;
caddr_t addr;
ulong_t size;
ssize_t soff;
TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_UNLOAD,
"devmap_unload:start dhp=%p offset=%llx len=%lx",
(void *)dhp, offset, len);
DEBUGF(7, (CE_CONT, "devmap_unload: dhp %p offset %llx len %lx\n",
(void *)dhp, offset, len));
soff = (ssize_t)(offset - dhp->dh_uoff);
soff = round_down_p2(soff, PAGESIZE);
if (soff < 0 || soff >= dhp->dh_len)
return (FC_MAKE_ERR(EINVAL));
len = round_up_p2(len, PAGESIZE);
if (len == 0)
size = dhp->dh_len - soff;
else {
size = len;
if ((soff + size) > dhp->dh_len)
return (FC_MAKE_ERR(EINVAL));
}
addr = (caddr_t)(soff + dhp->dh_uvaddr);
if (dhp->dh_flags & DEVMAP_FLAG_LARGE) {
hat_unload(dhp->dh_seg->s_as->a_hat, dhp->dh_uvaddr,
dhp->dh_len, HAT_UNLOAD|HAT_UNLOAD_OTHER);
} else {
hat_unload(dhp->dh_seg->s_as->a_hat, addr, size,
HAT_UNLOAD|HAT_UNLOAD_OTHER);
}
return (0);
}
static void
devmap_get_large_pgsize(devmap_handle_t *dhp, size_t len, caddr_t addr,
size_t *llen, caddr_t *laddr)
{
ulong_t off;
ulong_t pfn;
ulong_t pgsize;
uint_t first = 1;
TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_GET_LARGE_PGSIZE,
"devmap_get_large_pgsize:start");
ASSERT(dhp_is_devmem(dhp));
ASSERT(!(dhp->dh_flags & DEVMAP_MAPPING_INVALID));
*llen = 0;
off = (ulong_t)(addr - dhp->dh_uvaddr);
while ((long)len > 0) {
len = devmap_roundup(dhp, off, len, &pfn, &pgsize);
if (first) {
*laddr = dhp->dh_uvaddr + ptob(pfn - dhp->dh_pfn);
first = 0;
}
*llen += pgsize;
off = ptob(pfn - dhp->dh_pfn) + pgsize;
}
ASSERT(*llen >= len && *laddr <= addr);
ASSERT((*laddr + *llen) >= (addr + len));
}
static struct devmap_softlock *
devmap_softlock_init(dev_t dev, ulong_t id)
{
struct devmap_softlock *slock;
struct devmap_softlock *tmp;
TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SOFTLOCK_INIT,
"devmap_softlock_init:start");
tmp = kmem_zalloc(sizeof (struct devmap_softlock), KM_SLEEP);
mutex_enter(&devmap_slock);
for (slock = devmap_slist; slock != NULL; slock = slock->next)
if ((slock->dev == dev) && (slock->id == id))
break;
if (slock == NULL) {
slock = tmp;
slock->dev = dev;
slock->id = id;
mutex_init(&slock->lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&slock->cv, NULL, CV_DEFAULT, NULL);
slock->next = devmap_slist;
devmap_slist = slock;
} else
kmem_free(tmp, sizeof (struct devmap_softlock));
mutex_enter(&slock->lock);
slock->refcnt++;
mutex_exit(&slock->lock);
mutex_exit(&devmap_slock);
return (slock);
}
static void
devmap_softlock_rele(devmap_handle_t *dhp)
{
struct devmap_softlock *slock = dhp->dh_softlock;
struct devmap_softlock *tmp;
struct devmap_softlock *parent;
TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SOFTLOCK_RELE,
"devmap_softlock_rele:start");
mutex_enter(&devmap_slock);
mutex_enter(&slock->lock);
ASSERT(slock->refcnt > 0);
slock->refcnt--;
if (slock->refcnt == 0) {
slock->softlocked = 0;
cv_signal(&slock->cv);
if (devmap_slist == slock)
devmap_slist = slock->next;
else {
parent = devmap_slist;
for (tmp = devmap_slist->next; tmp != NULL;
tmp = tmp->next) {
if (tmp == slock) {
parent->next = tmp->next;
break;
}
parent = tmp;
}
}
mutex_exit(&slock->lock);
mutex_destroy(&slock->lock);
cv_destroy(&slock->cv);
kmem_free(slock, sizeof (struct devmap_softlock));
} else
mutex_exit(&slock->lock);
mutex_exit(&devmap_slock);
}
static void
devmap_ctx_rele(devmap_handle_t *dhp)
{
struct devmap_ctx *devctx = dhp->dh_ctx;
struct devmap_ctx *tmp;
struct devmap_ctx *parent;
timeout_id_t tid;
TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_CTX_RELE,
"devmap_ctx_rele:start");
mutex_enter(&devmapctx_lock);
mutex_enter(&devctx->lock);
ASSERT(devctx->refcnt > 0);
devctx->refcnt--;
if (devctx->refcnt == 0) {
if (devctx->timeout != 0) {
TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_CTX_RELE_CK1,
"devmap_ctx_rele:untimeout ctx->timeout");
tid = devctx->timeout;
mutex_exit(&devctx->lock);
(void) untimeout(tid);
mutex_enter(&devctx->lock);
}
devctx->oncpu = 0;
cv_signal(&devctx->cv);
if (devmapctx_list == devctx)
devmapctx_list = devctx->next;
else {
parent = devmapctx_list;
for (tmp = devmapctx_list->next; tmp != NULL;
tmp = tmp->next) {
if (tmp == devctx) {
parent->next = tmp->next;
break;
}
parent = tmp;
}
}
mutex_exit(&devctx->lock);
mutex_destroy(&devctx->lock);
cv_destroy(&devctx->cv);
kmem_free(devctx, sizeof (struct devmap_ctx));
} else
mutex_exit(&devctx->lock);
mutex_exit(&devmapctx_lock);
}
int
devmap_load(devmap_cookie_t dhc, offset_t offset, size_t len, uint_t type,
uint_t rw)
{
devmap_handle_t *dhp = (devmap_handle_t *)dhc;
struct as *asp = dhp->dh_seg->s_as;
caddr_t addr;
ulong_t size;
ssize_t soff;
int rc;
TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_LOAD,
"devmap_load:start dhp=%p offset=%llx len=%lx",
(void *)dhp, offset, len);
DEBUGF(7, (CE_CONT, "devmap_load: dhp %p offset %llx len %lx\n",
(void *)dhp, offset, len));
if ((asp != &kas) && !AS_LOCK_HELD(asp))
return (FC_MAKE_ERR(EINVAL));
soff = (ssize_t)(offset - dhp->dh_uoff);
soff = round_down_p2(soff, PAGESIZE);
if (soff < 0 || soff >= dhp->dh_len)
return (FC_MAKE_ERR(EINVAL));
len = round_up_p2(len, PAGESIZE);
if (len == 0)
size = dhp->dh_len - soff;
else {
size = len;
if ((soff + size) > dhp->dh_len)
return (FC_MAKE_ERR(EINVAL));
}
addr = (caddr_t)(soff + dhp->dh_uvaddr);
HOLD_DHP_LOCK(dhp);
rc = segdev_faultpages(asp->a_hat,
dhp->dh_seg, addr, size, type, rw, dhp);
RELE_DHP_LOCK(dhp);
return (rc);
}
int
devmap_setup(dev_t dev, offset_t off, struct as *as, caddr_t *addrp,
size_t len, uint_t prot, uint_t maxprot, uint_t flags, struct cred *cred)
{
register devmap_handle_t *dhp;
int (*devmap)(dev_t, devmap_cookie_t, offset_t, size_t,
size_t *, uint_t);
int (*mmap)(dev_t, off_t, int);
struct devmap_callback_ctl *callbackops;
devmap_handle_t *dhp_head = NULL;
devmap_handle_t *dhp_prev = NULL;
devmap_handle_t *dhp_curr;
caddr_t addr;
int map_flag;
int ret;
ulong_t total_len;
size_t map_len;
size_t resid_len = len;
offset_t map_off = off;
struct devmap_softlock *slock = NULL;
#ifdef lint
cred = cred;
#endif
TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_SETUP,
"devmap_setup:start off=%llx len=%lx", off, len);
DEBUGF(3, (CE_CONT, "devmap_setup: off %llx len %lx\n",
off, len));
devmap = devopsp[getmajor(dev)]->devo_cb_ops->cb_devmap;
mmap = devopsp[getmajor(dev)]->devo_cb_ops->cb_mmap;
if (devmap == NULL || devmap == nulldev || devmap == nodev)
return (EINVAL);
map_flag = devopsp[getmajor(dev)]->devo_cb_ops->cb_flag;
if ((map_flag & D_DEVMAP) == 0 && (mmap == NULL || mmap == nulldev))
return (EINVAL);
for (total_len = 0; total_len < len; total_len += map_len) {
dhp = kmem_zalloc(sizeof (devmap_handle_t), KM_SLEEP);
if (dhp_prev != NULL)
dhp_prev->dh_next = dhp;
else
dhp_head = dhp;
dhp_prev = dhp;
dhp->dh_prot = prot;
dhp->dh_orig_maxprot = dhp->dh_maxprot = maxprot;
dhp->dh_dev = dev;
dhp->dh_timeout_length = CTX_TIMEOUT_VALUE;
dhp->dh_uoff = map_off;
if ((ret = cdev_devmap(dev, dhp, map_off,
resid_len, &map_len, get_udatamodel())) != 0) {
free_devmap_handle(dhp_head);
return (ENXIO);
}
if (map_len & PAGEOFFSET) {
free_devmap_handle(dhp_head);
return (EINVAL);
}
callbackops = &dhp->dh_callbackops;
if ((callbackops->devmap_access == NULL) ||
(callbackops->devmap_access == nulldev) ||
(callbackops->devmap_access == nodev)) {
if ((flags & MAP_PRIVATE) != 0) {
free_devmap_handle(dhp_head);
return (EINVAL);
}
} else {
dhp->dh_softlock = devmap_softlock_init(dev,
(ulong_t)callbackops->devmap_access);
dhp->dh_ctx = devmap_ctxinit(dev,
(ulong_t)callbackops->devmap_access);
if (slock == NULL)
slock = dhp->dh_softlock;
if (slock != dhp->dh_softlock) {
free_devmap_handle(dhp_head);
return (ENOTSUP);
}
}
map_off += map_len;
resid_len -= map_len;
}
if ((ret = devmap_device(dhp_head, as, addrp, off, len, flags))
!= 0) {
free_devmap_handle(dhp_head);
return (ret);
}
dhp = dhp_head;
map_off = off;
addr = *addrp;
while (dhp != NULL) {
callbackops = &dhp->dh_callbackops;
dhp->dh_uvaddr = addr;
dhp_curr = dhp;
if (callbackops->devmap_map != NULL) {
ret = (*callbackops->devmap_map)((devmap_cookie_t)dhp,
dev, flags, map_off,
dhp->dh_len, &dhp->dh_pvtp);
if (ret != 0) {
struct segdev_data *sdp;
dhp = dhp_head;
map_off = off;
while (dhp != dhp_curr) {
callbackops = &dhp->dh_callbackops;
if (callbackops->devmap_unmap != NULL) {
(*callbackops->devmap_unmap)(
dhp, dhp->dh_pvtp,
map_off, dhp->dh_len,
NULL, NULL, NULL, NULL);
}
map_off += dhp->dh_len;
dhp = dhp->dh_next;
}
sdp = dhp_head->dh_seg->s_data;
sdp->devmap_data = NULL;
free_devmap_handle(dhp_head);
return (ENXIO);
}
}
map_off += dhp->dh_len;
addr += dhp->dh_len;
dhp = dhp->dh_next;
}
return (0);
}
int
ddi_devmap_segmap(dev_t dev, off_t off, ddi_as_handle_t as, caddr_t *addrp,
off_t len, uint_t prot, uint_t maxprot, uint_t flags, struct cred *cred)
{
TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SEGMAP,
"devmap_segmap:start");
return (devmap_setup(dev, (offset_t)off, (struct as *)as, addrp,
(size_t)len, prot, maxprot, flags, cred));
}
static void
devmap_devmem_large_page_setup(devmap_handle_t *dhp)
{
ASSERT(dhp_is_devmem(dhp));
dhp->dh_mmulevel = 0;
if (page_num_pagesizes() > 1 &&
!(dhp->dh_flags & (DEVMAP_USE_PAGESIZE | DEVMAP_MAPPING_INVALID))) {
ulong_t base;
int level;
base = (ulong_t)ptob(dhp->dh_pfn);
for (level = 1; level < page_num_pagesizes(); level++) {
size_t pgsize = page_get_pagesize(level);
if ((dhp->dh_len < pgsize) ||
(!VA_PA_PGSIZE_ALIGNED((uintptr_t)dhp->dh_uvaddr,
base, pgsize))) {
break;
}
}
dhp->dh_mmulevel = level - 1;
}
if (dhp->dh_mmulevel > 0) {
dhp->dh_flags |= DEVMAP_FLAG_LARGE;
} else {
dhp->dh_flags &= ~DEVMAP_FLAG_LARGE;
}
}
int
devmap_devmem_setup(devmap_cookie_t dhc, dev_info_t *dip,
struct devmap_callback_ctl *callbackops, uint_t rnumber, offset_t roff,
size_t len, uint_t maxprot, uint_t flags,
const ddi_device_acc_attr_t *accattrp)
{
devmap_handle_t *dhp = (devmap_handle_t *)dhc;
ddi_acc_handle_t handle;
ddi_map_req_t mr;
ddi_acc_hdl_t *hp;
int err;
TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_DEVMEM_SETUP,
"devmap_devmem_setup:start dhp=%p offset=%llx rnum=%d len=%lx",
(void *)dhp, roff, rnumber, (uint_t)len);
DEBUGF(2, (CE_CONT, "devmap_devmem_setup: dhp %p offset %llx "
"rnum %d len %lx\n", (void *)dhp, roff, rnumber, len));
if (dhp->dh_flags & DEVMAP_SETUP_DONE)
return (DDI_FAILURE);
if ((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) != dhp->dh_prot)
return (DDI_FAILURE);
if (flags & DEVMAP_MAPPING_INVALID) {
if (!(flags & DEVMAP_ALLOW_REMAP)) {
return (DDI_FAILURE);
}
dhp->dh_pfn = PFN_INVALID;
} else {
handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
if (handle == NULL)
return (DDI_FAILURE);
hp = impl_acc_hdl_get(handle);
hp->ah_vers = VERS_ACCHDL;
hp->ah_dip = dip;
hp->ah_rnumber = rnumber;
hp->ah_offset = roff;
hp->ah_len = len;
if (accattrp != NULL)
hp->ah_acc = *accattrp;
mr.map_op = DDI_MO_MAP_LOCKED;
mr.map_type = DDI_MT_RNUMBER;
mr.map_obj.rnumber = rnumber;
mr.map_prot = maxprot & dhp->dh_orig_maxprot;
mr.map_flags = DDI_MF_DEVICE_MAPPING;
mr.map_handlep = hp;
mr.map_vers = DDI_MAP_VERSION;
err = ddi_map(dip, &mr, roff, len, (caddr_t *)&dhp->dh_pfn);
dhp->dh_hat_attr = hp->ah_hat_flags;
impl_acc_hdl_free(handle);
if (err)
return (DDI_FAILURE);
}
ASSERT(!pf_is_memory(dhp->dh_pfn));
dhp->dh_flags |= (flags & DEVMAP_SETUP_FLAGS);
dhp->dh_len = ptob(btopr(len));
dhp->dh_cookie = DEVMAP_DEVMEM_COOKIE;
dhp->dh_roff = ptob(btop(roff));
devmap_devmem_large_page_setup(dhp);
dhp->dh_maxprot = maxprot & dhp->dh_orig_maxprot;
ASSERT((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) == dhp->dh_prot);
if (callbackops != NULL) {
bcopy(callbackops, &dhp->dh_callbackops,
sizeof (struct devmap_callback_ctl));
}
if (dhp->dh_flags & DEVMAP_ALLOW_REMAP) {
mutex_init(&dhp->dh_lock, NULL, MUTEX_DEFAULT, NULL);
dhp->dh_flags |= DEVMAP_LOCK_INITED;
}
dhp->dh_flags |= DEVMAP_SETUP_DONE;
return (DDI_SUCCESS);
}
int
devmap_devmem_remap(devmap_cookie_t dhc, dev_info_t *dip,
uint_t rnumber, offset_t roff, size_t len, uint_t maxprot,
uint_t flags, const ddi_device_acc_attr_t *accattrp)
{
devmap_handle_t *dhp = (devmap_handle_t *)dhc;
ddi_acc_handle_t handle;
ddi_map_req_t mr;
ddi_acc_hdl_t *hp;
pfn_t pfn;
uint_t hat_flags;
int err;
TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_DEVMEM_REMAP,
"devmap_devmem_setup:start dhp=%p offset=%llx rnum=%d len=%lx",
(void *)dhp, roff, rnumber, (uint_t)len);
DEBUGF(2, (CE_CONT, "devmap_devmem_remap: dhp %p offset %llx "
"rnum %d len %lx\n", (void *)dhp, roff, rnumber, len));
if ((dhp->dh_flags & DEVMAP_SETUP_DONE) == 0 ||
(dhp->dh_flags & DEVMAP_ALLOW_REMAP) == 0)
return (DDI_FAILURE);
if ((flags != 0) && (flags != DEVMAP_MAPPING_INVALID))
return (DDI_FAILURE);
if ((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) != dhp->dh_prot)
return (DDI_FAILURE);
if (!(flags & DEVMAP_MAPPING_INVALID)) {
handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
if (handle == NULL)
return (DDI_FAILURE);
}
HOLD_DHP_LOCK(dhp);
hat_unload(dhp->dh_seg->s_as->a_hat, dhp->dh_uvaddr,
dhp->dh_len, HAT_UNLOAD|HAT_UNLOAD_OTHER);
if (flags & DEVMAP_MAPPING_INVALID) {
dhp->dh_flags |= DEVMAP_MAPPING_INVALID;
dhp->dh_pfn = PFN_INVALID;
} else {
dhp->dh_flags &= ~DEVMAP_MAPPING_INVALID;
hp = impl_acc_hdl_get(handle);
hp->ah_vers = VERS_ACCHDL;
hp->ah_dip = dip;
hp->ah_rnumber = rnumber;
hp->ah_offset = roff;
hp->ah_len = len;
if (accattrp != NULL)
hp->ah_acc = *accattrp;
mr.map_op = DDI_MO_MAP_LOCKED;
mr.map_type = DDI_MT_RNUMBER;
mr.map_obj.rnumber = rnumber;
mr.map_prot = maxprot & dhp->dh_orig_maxprot;
mr.map_flags = DDI_MF_DEVICE_MAPPING;
mr.map_handlep = hp;
mr.map_vers = DDI_MAP_VERSION;
err = ddi_map(dip, &mr, roff, len, (caddr_t *)&pfn);
hat_flags = hp->ah_hat_flags;
impl_acc_hdl_free(handle);
if (err) {
RELE_DHP_LOCK(dhp);
return (DDI_FAILURE);
}
dhp->dh_pfn = pfn;
dhp->dh_hat_attr = hat_flags;
}
dhp->dh_flags &= ~DEVMAP_FLAG_LARGE;
dhp->dh_cookie = DEVMAP_DEVMEM_COOKIE;
dhp->dh_roff = ptob(btop(roff));
devmap_devmem_large_page_setup(dhp);
dhp->dh_maxprot = maxprot & dhp->dh_orig_maxprot;
ASSERT((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) == dhp->dh_prot);
RELE_DHP_LOCK(dhp);
return (DDI_SUCCESS);
}
int
devmap_umem_setup(devmap_cookie_t dhc, dev_info_t *dip,
struct devmap_callback_ctl *callbackops, ddi_umem_cookie_t cookie,
offset_t off, size_t len, uint_t maxprot, uint_t flags,
const ddi_device_acc_attr_t *accattrp)
{
devmap_handle_t *dhp = (devmap_handle_t *)dhc;
struct ddi_umem_cookie *cp = (struct ddi_umem_cookie *)cookie;
TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_UMEM_SETUP,
"devmap_umem_setup:start dhp=%p offset=%llx cookie=%p len=%lx",
(void *)dhp, off, cookie, len);
DEBUGF(2, (CE_CONT, "devmap_umem_setup: dhp %p offset %llx "
"cookie %p len %lx\n", (void *)dhp, off, (void *)cookie, len));
if (cookie == NULL)
return (DDI_FAILURE);
if ((off + len) > cp->size)
return (DDI_FAILURE);
if (i_ddi_check_cache_attr(flags) == B_FALSE)
return (DDI_FAILURE);
if (dhp->dh_flags & DEVMAP_SETUP_DONE)
return (DDI_FAILURE);
if ((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) != dhp->dh_prot)
return (DDI_FAILURE);
if (flags & DEVMAP_MAPPING_INVALID) {
if (!(flags & DEVMAP_ALLOW_REMAP)) {
return (DDI_FAILURE);
}
} else {
dhp->dh_cookie = cookie;
dhp->dh_roff = ptob(btop(off));
dhp->dh_cvaddr = cp->cvaddr + dhp->dh_roff;
i_ddi_cacheattr_to_hatacc(flags, &dhp->dh_hat_attr);
i_ddi_devacc_to_hatacc(accattrp, &dhp->dh_hat_attr);
}
dhp->dh_flags |= (flags & DEVMAP_SETUP_FLAGS);
dhp->dh_len = ptob(btopr(len));
dhp->dh_maxprot = maxprot & dhp->dh_orig_maxprot;
ASSERT((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) == dhp->dh_prot);
if (callbackops != NULL) {
bcopy(callbackops, &dhp->dh_callbackops,
sizeof (struct devmap_callback_ctl));
}
if (dhp->dh_flags & DEVMAP_ALLOW_REMAP) {
mutex_init(&dhp->dh_lock, NULL, MUTEX_DEFAULT, NULL);
dhp->dh_flags |= DEVMAP_LOCK_INITED;
}
dhp->dh_flags |= DEVMAP_SETUP_DONE;
return (DDI_SUCCESS);
}
int
devmap_umem_remap(devmap_cookie_t dhc, dev_info_t *dip,
ddi_umem_cookie_t cookie, offset_t off, size_t len, uint_t maxprot,
uint_t flags, const ddi_device_acc_attr_t *accattrp)
{
devmap_handle_t *dhp = (devmap_handle_t *)dhc;
struct ddi_umem_cookie *cp = (struct ddi_umem_cookie *)cookie;
TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_UMEM_REMAP,
"devmap_umem_remap:start dhp=%p offset=%llx cookie=%p len=%lx",
(void *)dhp, off, cookie, len);
DEBUGF(2, (CE_CONT, "devmap_umem_remap: dhp %p offset %llx "
"cookie %p len %lx\n", (void *)dhp, off, (void *)cookie, len));
if ((dhp->dh_flags & DEVMAP_SETUP_DONE) == 0 ||
(dhp->dh_flags & DEVMAP_ALLOW_REMAP) == 0)
return (DDI_FAILURE);
if (flags != 0)
return (DDI_FAILURE);
if (i_ddi_check_cache_attr(flags) == B_FALSE)
return (DDI_FAILURE);
if ((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) != dhp->dh_prot)
return (DDI_FAILURE);
if ((off + len) > cp->size)
return (DDI_FAILURE);
HOLD_DHP_LOCK(dhp);
hat_unload(dhp->dh_seg->s_as->a_hat, dhp->dh_uvaddr,
dhp->dh_len, HAT_UNLOAD|HAT_UNLOAD_OTHER);
dhp->dh_cookie = cookie;
dhp->dh_roff = ptob(btop(off));
dhp->dh_cvaddr = cp->cvaddr + dhp->dh_roff;
i_ddi_cacheattr_to_hatacc(flags, &dhp->dh_hat_attr);
i_ddi_devacc_to_hatacc(accattrp, &dhp->dh_hat_attr);
dhp->dh_flags &= ~DEVMAP_FLAG_LARGE;
dhp->dh_maxprot = maxprot & dhp->dh_orig_maxprot;
ASSERT((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) == dhp->dh_prot);
RELE_DHP_LOCK(dhp);
return (DDI_SUCCESS);
}
void
devmap_set_ctx_timeout(devmap_cookie_t dhc, clock_t ticks)
{
devmap_handle_t *dhp = (devmap_handle_t *)dhc;
TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_SET_CTX_TIMEOUT,
"devmap_set_ctx_timeout:start dhp=%p ticks=%x",
(void *)dhp, ticks);
dhp->dh_timeout_length = ticks;
}
int
devmap_default_access(devmap_cookie_t dhp, void *pvtp, offset_t off,
size_t len, uint_t type, uint_t rw)
{
#ifdef lint
pvtp = pvtp;
#endif
TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_DEFAULT_ACCESS,
"devmap_default_access:start");
return (devmap_load(dhp, off, len, type, rw));
}
static void *
devmap_alloc_pages(vmem_t *vmp, size_t size, int vmflag)
{
ASSERT(vmp != NULL);
ASSERT(kvseg.s_base != NULL);
vmflag |= (VM_NORELOC | SEGKMEM_SHARELOCKED);
return (segkmem_alloc(vmp, size, vmflag));
}
static void
devmap_free_pages(vmem_t *vmp, void *inaddr, size_t size)
{
page_t *pp;
caddr_t addr = inaddr;
caddr_t eaddr;
pgcnt_t npages = btopr(size);
ASSERT(vmp != NULL);
ASSERT(kvseg.s_base != NULL);
ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0);
hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK);
for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) {
pp = page_find(&kvp, (u_offset_t)(uintptr_t)addr);
if (pp == NULL)
panic("devmap_free_pages: page not found");
if (!page_tryupgrade(pp)) {
page_unlock(pp);
pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)addr,
SE_EXCL);
if (pp == NULL)
panic("devmap_free_pages: page already freed");
}
pp->p_lckcnt = 0;
page_destroy(pp, 0);
}
page_unresv(npages);
if (vmp != NULL)
vmem_free(vmp, inaddr, size);
}
static void *
devmap_umem_alloc_np(size_t size, size_t flags)
{
void *buf;
int vmflags = (flags & DDI_UMEM_NOSLEEP)? VM_NOSLEEP : VM_SLEEP;
buf = vmem_alloc(umem_np_arena, size, vmflags);
if (buf != NULL)
bzero(buf, size);
return (buf);
}
static void
devmap_umem_free_np(void *addr, size_t size)
{
vmem_free(umem_np_arena, addr, size);
}
void *
ddi_umem_alloc(size_t size, int flags, ddi_umem_cookie_t *cookie)
{
register size_t len = ptob(btopr(size));
void *buf = NULL;
struct ddi_umem_cookie *cp;
int iflags = 0;
*cookie = NULL;
TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_UMEM_ALLOC,
"devmap_umem_alloc:start");
if (len == 0)
return ((void *)NULL);
if ((cp = kmem_zalloc(sizeof (struct ddi_umem_cookie),
flags & DDI_UMEM_NOSLEEP ? KM_NOSLEEP : KM_SLEEP)) == NULL) {
ASSERT(flags & DDI_UMEM_NOSLEEP);
return ((void *)NULL);
}
if (flags & DDI_UMEM_PAGEABLE) {
ASSERT(!(flags & DDI_UMEM_TRASH));
iflags = KPD_ZERO;
if (flags & DDI_UMEM_NOSLEEP)
iflags |= KPD_NOWAIT;
if ((buf = segkp_get(segkp, len, iflags)) == NULL) {
kmem_free(cp, sizeof (struct ddi_umem_cookie));
return ((void *)NULL);
}
cp->type = KMEM_PAGEABLE;
mutex_init(&cp->lock, NULL, MUTEX_DEFAULT, NULL);
cp->locked = 0;
} else if (flags & DDI_UMEM_TRASH) {
ASSERT(!(flags & DDI_UMEM_PAGEABLE));
cp->type = UMEM_TRASH;
buf = NULL;
} else {
if ((buf = devmap_umem_alloc_np(len, flags)) == NULL) {
kmem_free(cp, sizeof (struct ddi_umem_cookie));
return ((void *)NULL);
}
cp->type = KMEM_NON_PAGEABLE;
}
cp->size = len;
cp->cvaddr = (caddr_t)buf;
*cookie = (void *)cp;
return (buf);
}
void
ddi_umem_free(ddi_umem_cookie_t cookie)
{
struct ddi_umem_cookie *cp;
TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_UMEM_FREE,
"devmap_umem_free:start");
if (cookie == NULL)
return;
cp = (struct ddi_umem_cookie *)cookie;
switch (cp->type) {
case KMEM_PAGEABLE :
ASSERT(cp->cvaddr != NULL && cp->size != 0);
mutex_enter(&cp->lock);
if (cp->locked) {
mutex_exit(&cp->lock);
panic("ddi_umem_free for cookie with pending faults %p",
(void *)cp);
}
segkp_release(segkp, cp->cvaddr);
mutex_destroy(&cp->lock);
break;
case KMEM_NON_PAGEABLE :
ASSERT(cp->cvaddr != NULL && cp->size != 0);
devmap_umem_free_np(cp->cvaddr, cp->size);
break;
case UMEM_TRASH :
break;
case UMEM_LOCKED :
ddi_umem_unlock(cookie);
return;
default:
panic("ddi_umem_free: illegal cookie type 0x%x\n",
cp->type);
}
kmem_free(cookie, sizeof (struct ddi_umem_cookie));
}
static int
segdev_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
{
struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_GETMEMID,
"segdev_getmemid:start");
memidp->val[0] = (uintptr_t)VTOCVP(sdp->vp);
memidp->val[1] = sdp->offset + (uintptr_t)(addr - seg->s_base);
return (0);
}
static lgrp_mem_policy_info_t *
segdev_getpolicy(struct seg *seg, caddr_t addr)
{
return (NULL);
}
static int
segdev_capable(struct seg *seg, segcapability_t capability)
{
return (0);
}
#define DEVMAP_UMEM_QUANTUM (8*PAGESIZE)
void
segdev_init(void)
{
struct seg kseg;
umem_np_arena = vmem_create("umem_np", NULL, 0, PAGESIZE,
devmap_alloc_pages, devmap_free_pages, heap_arena,
DEVMAP_UMEM_QUANTUM, VM_SLEEP);
kseg.s_as = &kas;
trashpp = page_create_va(&trashvp, 0, PAGESIZE,
PG_NORELOC | PG_EXCL | PG_WAIT, &kseg, NULL);
if (trashpp == NULL)
panic("segdev_init: failed to create trash page");
pagezero(trashpp, 0, PAGESIZE);
page_downgrade(trashpp);
}
int
segdev_copyfrom(struct seg *seg,
caddr_t uaddr, const void *devaddr, void *kaddr, size_t len)
{
struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
struct snode *sp = VTOS(VTOCVP(sdp->vp));
return (e_ddi_copyfromdev(sp->s_dip,
(off_t)(uaddr - seg->s_base), devaddr, kaddr, len));
}
int
segdev_copyto(struct seg *seg,
caddr_t uaddr, const void *kaddr, void *devaddr, size_t len)
{
struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
struct snode *sp = VTOS(VTOCVP(sdp->vp));
return (e_ddi_copytodev(sp->s_dip,
(off_t)(uaddr - seg->s_base), kaddr, devaddr, len));
}