#include <sys/types.h>
#include <sys/t_lock.h>
#include <sys/param.h>
#include <sys/sysmacros.h>
#include <sys/buf.h>
#include <sys/systm.h>
#include <sys/vnode.h>
#include <sys/mman.h>
#include <sys/errno.h>
#include <sys/cred.h>
#include <sys/kmem.h>
#include <sys/vtrace.h>
#include <sys/cmn_err.h>
#include <sys/debug.h>
#include <sys/thread.h>
#include <sys/dumphdr.h>
#include <sys/bitmap.h>
#include <sys/lgrp.h>
#include <vm/seg_kmem.h>
#include <vm/hat.h>
#include <vm/as.h>
#include <vm/seg.h>
#include <vm/seg_kpm.h>
#include <vm/seg_map.h>
#include <vm/page.h>
#include <vm/pvn.h>
#include <vm/rm.h>
static void segmap_free(struct seg *seg);
faultcode_t segmap_fault(struct hat *hat, struct seg *seg, caddr_t addr,
size_t len, enum fault_type type, enum seg_rw rw);
static faultcode_t segmap_faulta(struct seg *seg, caddr_t addr);
static int segmap_checkprot(struct seg *seg, caddr_t addr, size_t len,
uint_t prot);
static int segmap_kluster(struct seg *seg, caddr_t addr, ssize_t);
static int segmap_getprot(struct seg *seg, caddr_t addr, size_t len,
uint_t *protv);
static u_offset_t segmap_getoffset(struct seg *seg, caddr_t addr);
static int segmap_gettype(struct seg *seg, caddr_t addr);
static int segmap_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp);
static void segmap_dump(struct seg *seg);
static int segmap_pagelock(struct seg *seg, caddr_t addr, size_t len,
struct page ***ppp, enum lock_type type,
enum seg_rw rw);
static void segmap_badop(void);
static int segmap_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp);
static lgrp_mem_policy_info_t *segmap_getpolicy(struct seg *seg,
caddr_t addr);
static int segmap_capable(struct seg *seg, segcapability_t capability);
static caddr_t segmap_pagecreate_kpm(struct seg *, vnode_t *, u_offset_t,
struct smap *, enum seg_rw);
struct smap *get_smap_kpm(caddr_t, page_t **);
#define SEGMAP_BADOP(t) (t(*)())segmap_badop
static struct seg_ops segmap_ops = {
SEGMAP_BADOP(int),
SEGMAP_BADOP(int),
segmap_free,
segmap_fault,
segmap_faulta,
SEGMAP_BADOP(int),
segmap_checkprot,
segmap_kluster,
SEGMAP_BADOP(size_t),
SEGMAP_BADOP(int),
SEGMAP_BADOP(size_t),
SEGMAP_BADOP(int),
segmap_getprot,
segmap_getoffset,
segmap_gettype,
segmap_getvp,
SEGMAP_BADOP(int),
segmap_dump,
segmap_pagelock,
SEGMAP_BADOP(int),
segmap_getmemid,
segmap_getpolicy,
segmap_capable,
seg_inherit_notsup
};
static void segmap_unlock(struct hat *hat, struct seg *seg, caddr_t addr,
size_t len, enum seg_rw rw, struct smap *smp);
static void segmap_smapadd(struct smap *smp);
static struct smap *segmap_hashin(struct smap *smp, struct vnode *vp,
u_offset_t off, int hashid);
static void segmap_hashout(struct smap *smp);
struct segmapcnt segmapcnt = {
{ "fault", KSTAT_DATA_ULONG },
{ "faulta", KSTAT_DATA_ULONG },
{ "getmap", KSTAT_DATA_ULONG },
{ "get_use", KSTAT_DATA_ULONG },
{ "get_reclaim", KSTAT_DATA_ULONG },
{ "get_reuse", KSTAT_DATA_ULONG },
{ "get_unused", KSTAT_DATA_ULONG },
{ "get_nofree", KSTAT_DATA_ULONG },
{ "rel_async", KSTAT_DATA_ULONG },
{ "rel_write", KSTAT_DATA_ULONG },
{ "rel_free", KSTAT_DATA_ULONG },
{ "rel_abort", KSTAT_DATA_ULONG },
{ "rel_dontneed", KSTAT_DATA_ULONG },
{ "release", KSTAT_DATA_ULONG },
{ "pagecreate", KSTAT_DATA_ULONG },
{ "free_notfree", KSTAT_DATA_ULONG },
{ "free_dirty", KSTAT_DATA_ULONG },
{ "free", KSTAT_DATA_ULONG },
{ "stolen", KSTAT_DATA_ULONG },
{ "get_nomtx", KSTAT_DATA_ULONG }
};
kstat_named_t *segmapcnt_ptr = (kstat_named_t *)&segmapcnt;
uint_t segmapcnt_ndata = sizeof (segmapcnt) / sizeof (kstat_named_t);
#define MAP_PAGES(seg) ((seg)->s_size >> MAXBSHIFT)
#define MAP_PAGE(seg, addr) (((addr) - (seg)->s_base) >> MAXBSHIFT)
#define GET_SMAP(seg, addr) \
&(((struct segmap_data *)((seg)->s_data))->smd_sm[MAP_PAGE(seg, addr)])
#define SMAP_BIT_MASK(bitindex) (1 << ((bitindex) & 0xf))
static int smd_colormsk = 0;
static int smd_ncolor = 0;
static int smd_nfree = 0;
static int smd_freemsk = 0;
#ifdef DEBUG
static int *colors_used;
#endif
static struct smap *smd_smap;
static struct smaphash *smd_hash;
#ifdef SEGMAP_HASHSTATS
static unsigned int *smd_hash_len;
#endif
static struct smfree *smd_free;
static ulong_t smd_hashmsk = 0;
#define SEGMAP_MAXCOLOR 2
#define SEGMAP_CACHE_PAD 64
union segmap_cpu {
struct {
uint32_t scpu_free_ndx[SEGMAP_MAXCOLOR];
struct smap *scpu_last_smap;
ulong_t scpu_getmap;
ulong_t scpu_release;
ulong_t scpu_get_reclaim;
ulong_t scpu_fault;
ulong_t scpu_pagecreate;
ulong_t scpu_get_reuse;
} scpu;
char scpu_pad[SEGMAP_CACHE_PAD];
};
static union segmap_cpu *smd_cpu;
#define SHASHMTX(hashid) (&smd_hash[hashid].sh_mtx)
#define SMP2SMF(smp) (&smd_free[(smp - smd_smap) & smd_freemsk])
#define SMP2SMF_NDX(smp) (ushort_t)((smp - smd_smap) & smd_freemsk)
#define SMAPMTX(smp) (&smp->sm_mtx)
#define SMAP_HASHFUNC(vp, off, hashid) \
{ \
hashid = ((((uintptr_t)(vp) >> 6) + ((uintptr_t)(vp) >> 3) + \
((off) >> MAXBSHIFT)) & smd_hashmsk); \
}
int
segmap_kstat_update(kstat_t *ksp, int rw)
{
int i;
ulong_t getmap, release, get_reclaim;
ulong_t fault, pagecreate, get_reuse;
if (rw == KSTAT_WRITE)
return (EACCES);
getmap = release = get_reclaim = (ulong_t)0;
fault = pagecreate = get_reuse = (ulong_t)0;
for (i = 0; i < max_ncpus; i++) {
getmap += smd_cpu[i].scpu.scpu_getmap;
release += smd_cpu[i].scpu.scpu_release;
get_reclaim += smd_cpu[i].scpu.scpu_get_reclaim;
fault += smd_cpu[i].scpu.scpu_fault;
pagecreate += smd_cpu[i].scpu.scpu_pagecreate;
get_reuse += smd_cpu[i].scpu.scpu_get_reuse;
}
segmapcnt.smp_getmap.value.ul = getmap;
segmapcnt.smp_release.value.ul = release;
segmapcnt.smp_get_reclaim.value.ul = get_reclaim;
segmapcnt.smp_fault.value.ul = fault;
segmapcnt.smp_pagecreate.value.ul = pagecreate;
segmapcnt.smp_get_reuse.value.ul = get_reuse;
return (0);
}
int
segmap_create(struct seg *seg, void *argsp)
{
struct segmap_data *smd;
struct smap *smp;
struct smfree *sm;
struct segmap_crargs *a = (struct segmap_crargs *)argsp;
struct smaphash *shashp;
union segmap_cpu *scpu;
long i, npages;
size_t hashsz;
uint_t nfreelist;
extern void prefetch_smap_w(void *);
extern int max_ncpus;
ASSERT(seg->s_as && RW_WRITE_HELD(&seg->s_as->a_lock));
if (((uintptr_t)seg->s_base | seg->s_size) & MAXBOFFSET) {
panic("segkmap not MAXBSIZE aligned");
}
smd = kmem_zalloc(sizeof (struct segmap_data), KM_SLEEP);
seg->s_data = (void *)smd;
seg->s_ops = &segmap_ops;
smd->smd_prot = a->prot;
nfreelist = a->nfreelist;
if (nfreelist == 0)
nfreelist = max_ncpus;
else if (nfreelist > 4 * max_ncpus) {
cmn_err(CE_WARN, "segmap_create: nfreelist out of range "
"%d, using %d", nfreelist, max_ncpus);
nfreelist = max_ncpus;
}
if (!ISP2(nfreelist)) {
nfreelist = 1 << (highbit(nfreelist));
}
if (a->shmsize)
smd_ncolor = a->shmsize >> MAXBSHIFT;
else
smd_ncolor = 1;
ASSERT((smd_ncolor & (smd_ncolor - 1)) == 0);
ASSERT(smd_ncolor <= SEGMAP_MAXCOLOR);
smd_colormsk = smd_ncolor - 1;
smd->smd_nfree = smd_nfree = smd_ncolor * nfreelist;
smd_freemsk = smd_nfree - 1;
smd_free = smd->smd_free =
kmem_zalloc(smd_nfree * sizeof (struct smfree), KM_SLEEP);
for (i = 0; i < smd_nfree; i++) {
sm = &smd->smd_free[i];
mutex_init(&sm->sm_freeq[0].smq_mtx, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&sm->sm_freeq[1].smq_mtx, NULL, MUTEX_DEFAULT, NULL);
sm->sm_allocq = &sm->sm_freeq[0];
sm->sm_releq = &sm->sm_freeq[1];
}
npages = MAP_PAGES(seg);
smd->smd_npages = npages;
hashsz = npages / SMAP_HASHAVELEN;
hashsz = 1 << (highbit(hashsz)-1);
smd_hashmsk = hashsz - 1;
smd_hash = smd->smd_hash =
kmem_alloc(hashsz * sizeof (struct smaphash), KM_SLEEP);
#ifdef SEGMAP_HASHSTATS
smd_hash_len =
kmem_zalloc(hashsz * sizeof (unsigned int), KM_SLEEP);
#endif
for (i = 0, shashp = smd_hash; i < hashsz; i++, shashp++) {
shashp->sh_hash_list = NULL;
mutex_init(&shashp->sh_mtx, NULL, MUTEX_DEFAULT, NULL);
}
smd_smap = smd->smd_sm =
kmem_alloc(sizeof (struct smap) * npages, KM_SLEEP);
for (smp = &smd->smd_sm[MAP_PAGES(seg) - 1];
smp >= smd->smd_sm; smp--) {
struct smap *smpfreelist;
struct sm_freeq *releq;
prefetch_smap_w((char *)smp);
smp->sm_vp = NULL;
smp->sm_hash = NULL;
smp->sm_off = 0;
smp->sm_bitmap = 0;
smp->sm_refcnt = 0;
mutex_init(&smp->sm_mtx, NULL, MUTEX_DEFAULT, NULL);
smp->sm_free_ndx = SMP2SMF_NDX(smp);
sm = SMP2SMF(smp);
releq = sm->sm_releq;
smpfreelist = releq->smq_free;
if (smpfreelist == 0) {
releq->smq_free = smp->sm_next = smp->sm_prev = smp;
} else {
smp->sm_next = smpfreelist;
smp->sm_prev = smpfreelist->sm_prev;
smpfreelist->sm_prev = smp;
smp->sm_prev->sm_next = smp;
releq->smq_free = smp->sm_next;
}
smp->sm_flags = 0;
#ifdef SEGKPM_SUPPORT
smp->sm_kpme_next = NULL;
smp->sm_kpme_prev = NULL;
smp->sm_kpme_page = NULL;
#endif
}
smd_cpu =
kmem_zalloc(sizeof (union segmap_cpu) * max_ncpus, KM_SLEEP);
for (i = 0, scpu = smd_cpu; i < max_ncpus; i++, scpu++) {
int j;
for (j = 0; j < smd_ncolor; j++)
scpu->scpu.scpu_free_ndx[j] = j;
scpu->scpu.scpu_last_smap = smd_smap;
}
vpm_init();
#ifdef DEBUG
colors_used = kmem_zalloc(smd_nfree * sizeof (int), KM_SLEEP);
#endif
return (0);
}
static void
segmap_free(seg)
struct seg *seg;
{
ASSERT(seg->s_as && RW_WRITE_HELD(&seg->s_as->a_lock));
}
static void
segmap_unlock(
struct hat *hat,
struct seg *seg,
caddr_t addr,
size_t len,
enum seg_rw rw,
struct smap *smp)
{
page_t *pp;
caddr_t adr;
u_offset_t off;
struct vnode *vp;
kmutex_t *smtx;
ASSERT(smp->sm_refcnt > 0);
#ifdef lint
seg = seg;
#endif
if (segmap_kpm && IS_KPM_ADDR(addr)) {
panic("segmap_unlock: called with kpm addr %p", (void *)addr);
}
vp = smp->sm_vp;
off = smp->sm_off + (u_offset_t)((uintptr_t)addr & MAXBOFFSET);
hat_unlock(hat, addr, P2ROUNDUP(len, PAGESIZE));
for (adr = addr; adr < addr + len; adr += PAGESIZE, off += PAGESIZE) {
ushort_t bitmask;
pp = page_find(vp, off);
if (pp == NULL) {
panic("segmap_unlock: page not found");
}
if (rw == S_WRITE) {
hat_setrefmod(pp);
} else if (rw != S_OTHER) {
TRACE_3(TR_FAC_VM, TR_SEGMAP_FAULT,
"segmap_fault:pp %p vp %p offset %llx", pp, vp, off);
hat_setref(pp);
}
bitmask = SMAP_BIT_MASK((off - smp->sm_off) >> PAGESHIFT);
ASSERT((u_offset_t)(off - smp->sm_off) <= INT_MAX);
smtx = SMAPMTX(smp);
mutex_enter(smtx);
if (smp->sm_bitmap & bitmask) {
smp->sm_bitmap &= ~bitmask;
}
mutex_exit(smtx);
page_unlock(pp);
}
}
#define MAXPPB (MAXBSIZE/4096)
faultcode_t
segmap_fault(
struct hat *hat,
struct seg *seg,
caddr_t addr,
size_t len,
enum fault_type type,
enum seg_rw rw)
{
struct segmap_data *smd = (struct segmap_data *)seg->s_data;
struct smap *smp;
page_t *pp, **ppp;
struct vnode *vp;
u_offset_t off;
page_t *pl[MAXPPB + 1];
uint_t prot;
u_offset_t addroff;
caddr_t adr;
int err;
u_offset_t sm_off;
int hat_flag;
if (segmap_kpm && IS_KPM_ADDR(addr)) {
int newpage;
kmutex_t *smtx;
#ifndef DEBUG
if (type != F_SOFTUNLOCK)
return (0);
#endif
if ((smp = get_smap_kpm(addr, NULL)) == NULL) {
panic("segmap_fault: smap not found "
"for addr %p", (void *)addr);
}
smtx = SMAPMTX(smp);
#ifdef DEBUG
newpage = smp->sm_flags & SM_KPM_NEWPAGE;
if (newpage) {
cmn_err(CE_WARN, "segmap_fault: newpage? smp %p",
(void *)smp);
}
if (type != F_SOFTUNLOCK) {
mutex_exit(smtx);
return (0);
}
#endif
mutex_exit(smtx);
vp = smp->sm_vp;
sm_off = smp->sm_off;
if (vp == NULL)
return (FC_MAKE_ERR(EIO));
ASSERT(smp->sm_refcnt > 0);
addroff = (u_offset_t)((uintptr_t)addr & MAXBOFFSET);
if (addroff + len > MAXBSIZE)
panic("segmap_fault: endaddr %p exceeds MAXBSIZE chunk",
(void *)(addr + len));
off = sm_off + addroff;
pp = page_find(vp, off);
if (pp == NULL)
panic("segmap_fault: softunlock page not found");
if (rw == S_WRITE) {
hat_setrefmod(pp);
} else {
TRACE_3(TR_FAC_VM, TR_SEGMAP_FAULT,
"segmap_fault:pp %p vp %p offset %llx",
pp, vp, off);
hat_setref(pp);
}
return (0);
}
smd_cpu[CPU->cpu_seqid].scpu.scpu_fault++;
smp = GET_SMAP(seg, addr);
vp = smp->sm_vp;
sm_off = smp->sm_off;
if (vp == NULL)
return (FC_MAKE_ERR(EIO));
ASSERT(smp->sm_refcnt > 0);
addroff = (u_offset_t)((uintptr_t)addr & MAXBOFFSET);
if (addroff + len > MAXBSIZE) {
panic("segmap_fault: endaddr %p "
"exceeds MAXBSIZE chunk", (void *)(addr + len));
}
off = sm_off + addroff;
if (type == F_SOFTUNLOCK) {
segmap_unlock(hat, seg, addr, len, rw, smp);
return (0);
}
TRACE_3(TR_FAC_VM, TR_SEGMAP_GETPAGE,
"segmap_getpage:seg %p addr %p vp %p", seg, addr, vp);
err = VOP_GETPAGE(vp, (offset_t)off, len, &prot, pl, MAXBSIZE,
seg, addr, rw, CRED(), NULL);
if (err)
return (FC_MAKE_ERR(err));
prot &= smd->smd_prot;
ppp = pl;
while ((pp = *ppp++) != NULL) {
u_offset_t poff;
ASSERT(pp->p_vnode == vp);
hat_flag = HAT_LOAD;
if (pp->p_offset < sm_off ||
pp->p_offset >= sm_off + MAXBSIZE) {
(void) page_release(pp, 1);
continue;
}
ASSERT(hat == kas.a_hat);
poff = pp->p_offset;
adr = addr + (poff - off);
if (adr >= addr && adr < addr + len) {
hat_setref(pp);
TRACE_3(TR_FAC_VM, TR_SEGMAP_FAULT,
"segmap_fault:pp %p vp %p offset %llx",
pp, vp, poff);
if (type == F_SOFTLOCK)
hat_flag = HAT_LOAD_LOCK;
}
if (IS_VMODSORT(vp)) {
if (rw == S_WRITE)
hat_setmod(pp);
else if (rw != S_OTHER && !hat_ismod(pp))
prot &= ~PROT_WRITE;
}
hat_memload(hat, adr, pp, prot, hat_flag);
if (hat_flag != HAT_LOAD_LOCK)
page_unlock(pp);
}
return (0);
}
static faultcode_t
segmap_faulta(struct seg *seg, caddr_t addr)
{
struct smap *smp;
struct vnode *vp;
u_offset_t off;
int err;
if (segmap_kpm && IS_KPM_ADDR(addr)) {
int newpage;
kmutex_t *smtx;
#ifdef DEBUG
if ((smp = get_smap_kpm(addr, NULL)) == NULL) {
panic("segmap_faulta: smap not found "
"for addr %p", (void *)addr);
}
smtx = SMAPMTX(smp);
newpage = smp->sm_flags & SM_KPM_NEWPAGE;
mutex_exit(smtx);
if (newpage)
cmn_err(CE_WARN, "segmap_faulta: newpage? smp %p",
(void *)smp);
#endif
return (0);
}
segmapcnt.smp_faulta.value.ul++;
smp = GET_SMAP(seg, addr);
ASSERT(smp->sm_refcnt > 0);
vp = smp->sm_vp;
off = smp->sm_off;
if (vp == NULL) {
cmn_err(CE_WARN, "segmap_faulta - no vp");
return (FC_MAKE_ERR(EIO));
}
TRACE_3(TR_FAC_VM, TR_SEGMAP_GETPAGE,
"segmap_getpage:seg %p addr %p vp %p", seg, addr, vp);
err = VOP_GETPAGE(vp, (offset_t)(off + ((offset_t)((uintptr_t)addr
& MAXBOFFSET))), PAGESIZE, (uint_t *)NULL, (page_t **)NULL, 0,
seg, addr, S_READ, CRED(), NULL);
if (err)
return (FC_MAKE_ERR(err));
return (0);
}
static int
segmap_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
{
struct segmap_data *smd = (struct segmap_data *)seg->s_data;
ASSERT(seg->s_as && RW_LOCK_HELD(&seg->s_as->a_lock));
return (((smd->smd_prot & prot) != prot) ? EACCES : 0);
}
static int
segmap_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
{
struct segmap_data *smd = (struct segmap_data *)seg->s_data;
size_t pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1;
ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
if (pgno != 0) {
do {
protv[--pgno] = smd->smd_prot;
} while (pgno != 0);
}
return (0);
}
static u_offset_t
segmap_getoffset(struct seg *seg, caddr_t addr)
{
struct segmap_data *smd = (struct segmap_data *)seg->s_data;
ASSERT(seg->s_as && RW_READ_HELD(&seg->s_as->a_lock));
return ((u_offset_t)smd->smd_sm->sm_off + (addr - seg->s_base));
}
static int
segmap_gettype(struct seg *seg, caddr_t addr)
{
ASSERT(seg->s_as && RW_READ_HELD(&seg->s_as->a_lock));
return (MAP_SHARED);
}
static int
segmap_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
{
struct segmap_data *smd = (struct segmap_data *)seg->s_data;
ASSERT(seg->s_as && RW_READ_HELD(&seg->s_as->a_lock));
*vpp = smd->smd_sm->sm_vp;
return (0);
}
static int
segmap_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
{
return (0);
}
static void
segmap_badop()
{
panic("segmap_badop");
}
static void
segmap_smapadd(struct smap *smp)
{
struct smfree *sm;
struct smap *smpfreelist;
struct sm_freeq *releq;
ASSERT(MUTEX_HELD(SMAPMTX(smp)));
if (smp->sm_refcnt != 0) {
panic("segmap_smapadd");
}
sm = &smd_free[smp->sm_free_ndx];
releq = sm->sm_releq;
if (releq == &sm->sm_freeq[0])
smp->sm_flags |= SM_QNDX_ZERO;
else
smp->sm_flags &= ~SM_QNDX_ZERO;
mutex_enter(&releq->smq_mtx);
smpfreelist = releq->smq_free;
if (smpfreelist == 0) {
int want;
releq->smq_free = smp->sm_next = smp->sm_prev = smp;
want = sm->sm_want;
mutex_exit(&releq->smq_mtx);
if (want) {
mutex_enter(&sm->sm_freeq[0].smq_mtx);
if (sm->sm_want)
cv_signal(&sm->sm_free_cv);
mutex_exit(&sm->sm_freeq[0].smq_mtx);
}
} else {
smp->sm_next = smpfreelist;
smp->sm_prev = smpfreelist->sm_prev;
smpfreelist->sm_prev = smp;
smp->sm_prev->sm_next = smp;
mutex_exit(&releq->smq_mtx);
}
}
static struct smap *
segmap_hashin(struct smap *smp, struct vnode *vp, u_offset_t off, int hashid)
{
struct smap **hpp;
struct smap *tmp;
kmutex_t *hmtx;
ASSERT(MUTEX_HELD(SMAPMTX(smp)));
ASSERT(smp->sm_vp == NULL);
ASSERT(smp->sm_hash == NULL);
ASSERT(smp->sm_prev == NULL);
ASSERT(smp->sm_next == NULL);
ASSERT(hashid >= 0 && hashid <= smd_hashmsk);
hmtx = SHASHMTX(hashid);
mutex_enter(hmtx);
for (tmp = smd_hash[hashid].sh_hash_list;
tmp != NULL; tmp = tmp->sm_hash)
if (tmp->sm_vp == vp && tmp->sm_off == off)
break;
if (tmp == NULL) {
smp->sm_vp = vp;
smp->sm_off = off;
hpp = &smd_hash[hashid].sh_hash_list;
smp->sm_hash = *hpp;
*hpp = smp;
#ifdef SEGMAP_HASHSTATS
smd_hash_len[hashid]++;
#endif
}
mutex_exit(hmtx);
return (tmp);
}
static void
segmap_hashout(struct smap *smp)
{
struct smap **hpp, *hp;
struct vnode *vp;
kmutex_t *mtx;
int hashid;
u_offset_t off;
ASSERT(MUTEX_HELD(SMAPMTX(smp)));
vp = smp->sm_vp;
off = smp->sm_off;
SMAP_HASHFUNC(vp, off, hashid);
mtx = SHASHMTX(hashid);
mutex_enter(mtx);
hpp = &smd_hash[hashid].sh_hash_list;
for (;;) {
hp = *hpp;
if (hp == NULL) {
panic("segmap_hashout");
}
if (hp == smp)
break;
hpp = &hp->sm_hash;
}
*hpp = smp->sm_hash;
smp->sm_hash = NULL;
#ifdef SEGMAP_HASHSTATS
smd_hash_len[hashid]--;
#endif
mutex_exit(mtx);
smp->sm_vp = NULL;
smp->sm_off = (u_offset_t)0;
}
void
segmap_pagefree(struct vnode *vp, u_offset_t off)
{
u_offset_t pgoff;
page_t *pp;
for (pgoff = off; pgoff < off + MAXBSIZE; pgoff += PAGESIZE) {
if ((pp = page_lookup_nowait(vp, pgoff, SE_EXCL)) == NULL)
continue;
switch (page_release(pp, 1)) {
case PGREL_NOTREL:
segmapcnt.smp_free_notfree.value.ul++;
break;
case PGREL_MOD:
segmapcnt.smp_free_dirty.value.ul++;
break;
case PGREL_CLEAN:
segmapcnt.smp_free.value.ul++;
break;
}
}
}
static void
grab_smp(struct smap *smp, page_t *pp)
{
ASSERT(MUTEX_HELD(SMAPMTX(smp)));
ASSERT(smp->sm_refcnt == 0);
if (smp->sm_vp != (struct vnode *)NULL) {
struct vnode *vp = smp->sm_vp;
u_offset_t off = smp->sm_off;
smd_cpu[CPU->cpu_seqid].scpu.scpu_get_reuse++;
segmap_hashout(smp);
if (segmap_kpm) {
caddr_t vaddr;
int hat_unload_needed = 0;
if (pp != NULL) {
vaddr = hat_kpm_page2va(pp, 1);
hat_kpm_mapout(pp, GET_KPME(smp), vaddr);
page_unlock(pp);
}
if (smp->sm_flags & SM_NOTKPM_RELEASED) {
hat_unload_needed = 1;
smp->sm_flags &= ~SM_NOTKPM_RELEASED;
}
if (hat_unload_needed) {
hat_unload(kas.a_hat, segkmap->s_base +
((smp - smd_smap) * MAXBSIZE),
MAXBSIZE, HAT_UNLOAD);
}
} else {
ASSERT(smp->sm_flags & SM_NOTKPM_RELEASED);
smp->sm_flags &= ~SM_NOTKPM_RELEASED;
hat_unload(kas.a_hat, segkmap->s_base +
((smp - smd_smap) * MAXBSIZE),
MAXBSIZE, HAT_UNLOAD);
}
segmap_pagefree(vp, off);
}
}
static struct smap *
get_free_smp(int free_ndx)
{
struct smfree *sm;
kmutex_t *smtx;
struct smap *smp, *first;
struct sm_freeq *allocq, *releq;
struct kpme *kpme;
page_t *pp = NULL;
int end_ndx, page_locked = 0;
end_ndx = free_ndx;
sm = &smd_free[free_ndx];
retry_queue:
allocq = sm->sm_allocq;
mutex_enter(&allocq->smq_mtx);
if ((smp = allocq->smq_free) == NULL) {
skip_queue:
if (sm->sm_allocq != allocq) {
mutex_exit(&allocq->smq_mtx);
goto retry_queue;
}
releq = sm->sm_releq;
if (!mutex_tryenter(&releq->smq_mtx)) {
mutex_exit(&allocq->smq_mtx);
mutex_enter(&releq->smq_mtx);
mutex_exit(&releq->smq_mtx);
goto retry_queue;
}
if (releq->smq_free == NULL) {
free_ndx = (free_ndx + smd_ncolor) & smd_freemsk;
if (free_ndx != end_ndx) {
mutex_exit(&releq->smq_mtx);
mutex_exit(&allocq->smq_mtx);
sm = &smd_free[free_ndx];
goto retry_queue;
}
segmapcnt.smp_get_nofree.value.ul++;
sm->sm_want++;
mutex_exit(&sm->sm_freeq[1].smq_mtx);
cv_wait(&sm->sm_free_cv,
&sm->sm_freeq[0].smq_mtx);
sm->sm_want--;
mutex_exit(&sm->sm_freeq[0].smq_mtx);
sm = &smd_free[free_ndx];
goto retry_queue;
} else {
sm->sm_allocq = releq;
sm->sm_releq = allocq;
mutex_exit(&allocq->smq_mtx);
mutex_exit(&releq->smq_mtx);
if (page_locked) {
delay(hz >> 2);
page_locked = 0;
}
goto retry_queue;
}
} else {
first = smp;
next_smap:
smtx = SMAPMTX(smp);
if (!mutex_tryenter(smtx)) {
if ((smp = smp->sm_next) == first) {
goto skip_queue;
} else {
goto next_smap;
}
} else {
if (segmap_kpm && smp->sm_vp != NULL) {
kpme = GET_KPME(smp);
pp = kpme->kpe_page;
if (pp != NULL) {
if (!page_trylock(pp, SE_SHARED)) {
smp = smp->sm_next;
mutex_exit(smtx);
page_locked = 1;
pp = NULL;
if (smp == first) {
goto skip_queue;
} else {
goto next_smap;
}
} else {
if (kpme->kpe_page == NULL) {
page_unlock(pp);
pp = NULL;
}
}
}
}
if (first == smp) {
ASSERT(first == allocq->smq_free);
allocq->smq_free = smp->sm_next;
}
if (allocq->smq_free == smp)
allocq->smq_free = NULL;
else {
smp->sm_prev->sm_next = smp->sm_next;
smp->sm_next->sm_prev = smp->sm_prev;
}
mutex_exit(&allocq->smq_mtx);
smp->sm_prev = smp->sm_next = NULL;
ASSERT((pp == NULL) || PAGE_LOCKED(pp));
grab_smp(smp, pp);
ASSERT(SMAPMTX(smp) == smtx);
ASSERT(MUTEX_HELD(smtx));
return (smp);
}
}
}
int
segmap_pagecreate(struct seg *seg, caddr_t addr, size_t len, int softlock)
{
struct segmap_data *smd = (struct segmap_data *)seg->s_data;
page_t *pp;
u_offset_t off;
struct smap *smp;
struct vnode *vp;
caddr_t eaddr;
int newpage = 0;
uint_t prot;
kmutex_t *smtx;
int hat_flag;
ASSERT(seg->s_as == &kas);
if (segmap_kpm && IS_KPM_ADDR(addr)) {
if ((smp = get_smap_kpm(addr, NULL)) == NULL) {
panic("segmap_pagecreate: smap not found "
"for addr %p", (void *)addr);
}
smtx = SMAPMTX(smp);
newpage = smp->sm_flags & SM_KPM_NEWPAGE;
smp->sm_flags &= ~SM_KPM_NEWPAGE;
mutex_exit(smtx);
return (newpage);
}
smd_cpu[CPU->cpu_seqid].scpu.scpu_pagecreate++;
eaddr = addr + len;
addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
smp = GET_SMAP(seg, addr);
ASSERT(smp->sm_refcnt > 0);
vp = smp->sm_vp;
off = smp->sm_off + ((u_offset_t)((uintptr_t)addr & MAXBOFFSET));
prot = smd->smd_prot;
for (; addr < eaddr; addr += PAGESIZE, off += PAGESIZE) {
hat_flag = HAT_LOAD;
pp = page_lookup(vp, off, SE_SHARED);
if (pp == NULL) {
ushort_t bitindex;
if ((pp = page_create_va(vp, off,
PAGESIZE, PG_WAIT, seg, addr)) == NULL) {
panic("segmap_pagecreate: page_create failed");
}
newpage = 1;
page_io_unlock(pp);
bitindex = (ushort_t)((off - smp->sm_off) >> PAGESHIFT);
ASSERT((u_offset_t)(off - smp->sm_off) <= INT_MAX);
smtx = SMAPMTX(smp);
mutex_enter(smtx);
smp->sm_bitmap |= SMAP_BIT_MASK(bitindex);
mutex_exit(smtx);
hat_flag = HAT_LOAD_LOCK;
} else if (softlock) {
hat_flag = HAT_LOAD_LOCK;
}
if (IS_VMODSORT(pp->p_vnode) && (prot & PROT_WRITE))
hat_setmod(pp);
hat_memload(kas.a_hat, addr, pp, prot, hat_flag);
if (hat_flag != HAT_LOAD_LOCK)
page_unlock(pp);
TRACE_5(TR_FAC_VM, TR_SEGMAP_PAGECREATE,
"segmap_pagecreate:seg %p addr %p pp %p vp %p offset %llx",
seg, addr, pp, vp, off);
}
return (newpage);
}
void
segmap_pageunlock(struct seg *seg, caddr_t addr, size_t len, enum seg_rw rw)
{
struct smap *smp;
ushort_t bitmask;
page_t *pp;
struct vnode *vp;
u_offset_t off;
caddr_t eaddr;
kmutex_t *smtx;
ASSERT(seg->s_as == &kas);
eaddr = addr + len;
addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
if (segmap_kpm && IS_KPM_ADDR(addr)) {
#ifdef DEBUG
if ((smp = get_smap_kpm(addr, NULL)) == NULL) {
panic("segmap_pageunlock: smap not found "
"for addr %p", (void *)addr);
}
ASSERT(smp->sm_refcnt > 0);
mutex_exit(SMAPMTX(smp));
#endif
return;
}
smp = GET_SMAP(seg, addr);
smtx = SMAPMTX(smp);
ASSERT(smp->sm_refcnt > 0);
vp = smp->sm_vp;
off = smp->sm_off + ((u_offset_t)((uintptr_t)addr & MAXBOFFSET));
for (; addr < eaddr; addr += PAGESIZE, off += PAGESIZE) {
bitmask = SMAP_BIT_MASK((int)(off - smp->sm_off) >> PAGESHIFT);
ASSERT((u_offset_t)(off - smp->sm_off) <= INT_MAX);
if (smp->sm_bitmap & bitmask) {
mutex_enter(smtx);
smp->sm_bitmap &= ~bitmask;
mutex_exit(smtx);
hat_unlock(kas.a_hat, addr, PAGESIZE);
pp = page_find(vp, off);
if (pp == NULL) {
panic("segmap_pageunlock: page not found");
}
if (rw == S_WRITE) {
hat_setrefmod(pp);
} else if (rw != S_OTHER) {
hat_setref(pp);
}
page_unlock(pp);
}
}
}
caddr_t
segmap_getmap(struct seg *seg, struct vnode *vp, u_offset_t off)
{
return (segmap_getmapflt(seg, vp, off, MAXBSIZE, 0, S_OTHER));
}
#define ELF_OFFZERO_VA (0x10000)
caddr_t
segmap_getmapflt(
struct seg *seg,
struct vnode *vp,
u_offset_t off,
size_t len,
int forcefault,
enum seg_rw rw)
{
struct smap *smp, *nsmp;
extern struct vnode *common_specvp();
caddr_t baseaddr;
u_offset_t baseoff;
int newslot;
caddr_t vaddr;
int color, hashid;
kmutex_t *hashmtx, *smapmtx;
struct smfree *sm;
page_t *pp;
struct kpme *kpme;
uint_t prot;
caddr_t base;
page_t *pl[MAXPPB + 1];
int error;
int is_kpm = 1;
ASSERT(seg->s_as == &kas);
ASSERT(seg == segkmap);
baseoff = off & (offset_t)MAXBMASK;
if (off + len > baseoff + MAXBSIZE) {
panic("segmap_getmap bad len");
}
if (vp->v_type == VBLK)
vp = common_specvp(vp);
smd_cpu[CPU->cpu_seqid].scpu.scpu_getmap++;
if (segmap_kpm == 0 ||
(forcefault == SM_PAGECREATE && rw != S_WRITE)) {
is_kpm = 0;
}
SMAP_HASHFUNC(vp, off, hashid);
hashmtx = SHASHMTX(hashid);
retry_hash:
mutex_enter(hashmtx);
for (smp = smd_hash[hashid].sh_hash_list;
smp != NULL; smp = smp->sm_hash)
if (smp->sm_vp == vp && smp->sm_off == baseoff)
break;
mutex_exit(hashmtx);
vrfy_smp:
if (smp != NULL) {
ASSERT(vp->v_count != 0);
smapmtx = SMAPMTX(smp);
mutex_enter(smapmtx);
if (smp->sm_vp != vp || smp->sm_off != baseoff) {
mutex_exit(smapmtx);
goto retry_hash;
}
if (smp->sm_refcnt == 0) {
smd_cpu[CPU->cpu_seqid].scpu.scpu_get_reclaim++;
if ((smp->sm_next != NULL)) {
struct sm_freeq *freeq;
ASSERT(smp->sm_prev != NULL);
sm = &smd_free[smp->sm_free_ndx];
if (smp->sm_flags & SM_QNDX_ZERO)
freeq = &sm->sm_freeq[0];
else
freeq = &sm->sm_freeq[1];
mutex_enter(&freeq->smq_mtx);
if (freeq->smq_free != smp) {
smp->sm_prev->sm_next = smp->sm_next;
smp->sm_next->sm_prev = smp->sm_prev;
} else if (smp == smp->sm_next) {
freeq->smq_free = NULL;
} else {
freeq->smq_free = smp->sm_next;
smp->sm_prev->sm_next = smp->sm_next;
smp->sm_next->sm_prev = smp->sm_prev;
}
mutex_exit(&freeq->smq_mtx);
smp->sm_prev = smp->sm_next = NULL;
} else {
ASSERT(smp->sm_prev == NULL);
segmapcnt.smp_stolen.value.ul++;
}
} else {
segmapcnt.smp_get_use.value.ul++;
}
smp->sm_refcnt++;
if (is_kpm) {
if (rw == S_WRITE) {
smp->sm_flags |= SM_WRITE_DATA;
} else if (rw == S_READ) {
smp->sm_flags |= SM_READ_DATA;
}
}
mutex_exit(smapmtx);
newslot = 0;
} else {
uint32_t free_ndx, *free_ndxp;
union segmap_cpu *scpu;
color = (baseoff >> MAXBSHIFT) & smd_colormsk;
scpu = smd_cpu+CPU->cpu_seqid;
free_ndxp = &scpu->scpu.scpu_free_ndx[color];
free_ndx = (*free_ndxp += smd_ncolor) & smd_freemsk;
#ifdef DEBUG
colors_used[free_ndx]++;
#endif
smp = get_free_smp(free_ndx);
smapmtx = SMAPMTX(smp);
ASSERT(smp->sm_vp == NULL);
if ((nsmp = segmap_hashin(smp, vp, baseoff, hashid)) != NULL) {
segmap_smapadd(smp);
mutex_exit(smapmtx);
smp = nsmp;
goto vrfy_smp;
}
smp->sm_refcnt++;
if (is_kpm) {
if (rw == S_WRITE) {
smp->sm_flags |= SM_WRITE_DATA;
} else if (rw == S_READ) {
smp->sm_flags |= SM_READ_DATA;
}
}
mutex_exit(smapmtx);
newslot = 1;
}
if (!is_kpm)
goto use_segmap_range;
ASSERT(PAGESIZE == MAXBSIZE);
(smd_cpu+CPU->cpu_seqid)->scpu.scpu_last_smap = smp;
if (forcefault == SM_PAGECREATE) {
baseaddr = segmap_pagecreate_kpm(seg, vp, baseoff, smp, rw);
return (baseaddr);
}
if (newslot == 0 &&
(pp = GET_KPME(smp)->kpe_page) != NULL) {
switch (rw) {
case S_READ:
case S_WRITE:
if (page_trylock(pp, SE_SHARED)) {
if (PP_ISFREE(pp) ||
!(pp->p_vnode == vp &&
pp->p_offset == baseoff)) {
page_unlock(pp);
pp = page_lookup(vp, baseoff,
SE_SHARED);
}
} else {
pp = page_lookup(vp, baseoff, SE_SHARED);
}
if (pp == NULL) {
ASSERT(GET_KPME(smp)->kpe_page == NULL);
break;
}
if (rw == S_WRITE &&
hat_page_getattr(pp, P_MOD | P_REF) !=
(P_MOD | P_REF)) {
page_unlock(pp);
break;
}
kpme = GET_KPME(smp);
if (kpme->kpe_page == pp) {
baseaddr = hat_kpm_page2va(pp, 0);
} else if (kpme->kpe_page == NULL) {
baseaddr = hat_kpm_mapin(pp, kpme);
} else {
panic("segmap_getmapflt: stale "
"kpme page, kpme %p", (void *)kpme);
}
if (rw == S_READ && !hat_isref(pp))
hat_setref(pp);
return (baseaddr);
default:
break;
}
}
base = segkpm_create_va(baseoff);
error = VOP_GETPAGE(vp, (offset_t)baseoff, len, &prot, pl, MAXBSIZE,
seg, base, rw, CRED(), NULL);
pp = pl[0];
if (error || pp == NULL) {
goto use_segmap_range;
}
ASSERT(pl[1] == NULL);
if (prot != PROT_ALL && forcefault == SM_LOCKPROTO) {
ASSERT(rw != S_WRITE);
ASSERT(PAGE_LOCKED(pp));
page_unlock(pp);
forcefault = 0;
goto use_segmap_range;
}
kpme = GET_KPME(smp);
if (kpme->kpe_page == pp) {
baseaddr = hat_kpm_page2va(pp, 0);
} else if (kpme->kpe_page == NULL) {
baseaddr = hat_kpm_mapin(pp, kpme);
} else {
panic("segmap_getmapflt: stale kpme page after "
"VOP_GETPAGE, kpme %p", (void *)kpme);
}
smd_cpu[CPU->cpu_seqid].scpu.scpu_fault++;
return (baseaddr);
use_segmap_range:
baseaddr = seg->s_base + ((smp - smd_smap) * MAXBSIZE);
TRACE_4(TR_FAC_VM, TR_SEGMAP_GETMAP,
"segmap_getmap:seg %p addr %p vp %p offset %llx",
seg, baseaddr, vp, baseoff);
vaddr = baseaddr + (off - baseoff);
if (forcefault && (newslot || !hat_probe(kas.a_hat, vaddr))) {
caddr_t pgaddr = (caddr_t)((uintptr_t)vaddr &
(uintptr_t)PAGEMASK);
(void) segmap_fault(kas.a_hat, seg, pgaddr,
(vaddr + len - pgaddr + PAGESIZE - 1) & (uintptr_t)PAGEMASK,
F_INVAL, rw);
}
return (baseaddr);
}
int
segmap_release(struct seg *seg, caddr_t addr, uint_t flags)
{
struct smap *smp;
int error;
int bflags = 0;
struct vnode *vp;
u_offset_t offset;
kmutex_t *smtx;
int is_kpm = 0;
page_t *pp;
if (segmap_kpm && IS_KPM_ADDR(addr)) {
if (((uintptr_t)addr & MAXBOFFSET) != 0) {
panic("segmap_release: addr %p not "
"MAXBSIZE aligned", (void *)addr);
}
if ((smp = get_smap_kpm(addr, &pp)) == NULL) {
panic("segmap_release: smap not found "
"for addr %p", (void *)addr);
}
TRACE_3(TR_FAC_VM, TR_SEGMAP_RELMAP,
"segmap_relmap:seg %p addr %p smp %p",
seg, addr, smp);
smtx = SMAPMTX(smp);
smp->sm_flags &= ~SM_KPM_NEWPAGE;
is_kpm = 1;
if (smp->sm_flags & SM_WRITE_DATA) {
hat_setrefmod(pp);
} else if (smp->sm_flags & SM_READ_DATA) {
hat_setref(pp);
}
} else {
if (addr < seg->s_base || addr >= seg->s_base + seg->s_size ||
((uintptr_t)addr & MAXBOFFSET) != 0) {
panic("segmap_release: bad addr %p", (void *)addr);
}
smp = GET_SMAP(seg, addr);
TRACE_3(TR_FAC_VM, TR_SEGMAP_RELMAP,
"segmap_relmap:seg %p addr %p smp %p",
seg, addr, smp);
smtx = SMAPMTX(smp);
mutex_enter(smtx);
smp->sm_flags |= SM_NOTKPM_RELEASED;
}
ASSERT(smp->sm_refcnt > 0);
if ((flags & ~SM_DONTNEED) != 0) {
if (flags & SM_WRITE)
segmapcnt.smp_rel_write.value.ul++;
if (flags & SM_ASYNC) {
bflags |= B_ASYNC;
segmapcnt.smp_rel_async.value.ul++;
}
if (flags & SM_INVAL) {
bflags |= B_INVAL;
segmapcnt.smp_rel_abort.value.ul++;
}
if (flags & SM_DESTROY) {
bflags |= (B_INVAL|B_TRUNC);
segmapcnt.smp_rel_abort.value.ul++;
}
if (smp->sm_refcnt == 1) {
if (flags & SM_FREE) {
bflags |= B_FREE;
segmapcnt.smp_rel_free.value.ul++;
}
if (flags & SM_DONTNEED) {
bflags |= B_DONTNEED;
segmapcnt.smp_rel_dontneed.value.ul++;
}
}
} else {
smd_cpu[CPU->cpu_seqid].scpu.scpu_release++;
}
vp = smp->sm_vp;
offset = smp->sm_off;
if (--smp->sm_refcnt == 0) {
smp->sm_flags &= ~(SM_WRITE_DATA | SM_READ_DATA);
if (flags & (SM_INVAL|SM_DESTROY)) {
segmap_hashout(smp);
if (is_kpm) {
hat_kpm_mapout(pp, GET_KPME(smp), addr);
if (smp->sm_flags & SM_NOTKPM_RELEASED) {
smp->sm_flags &= ~SM_NOTKPM_RELEASED;
hat_unload(kas.a_hat, segkmap->s_base +
((smp - smd_smap) * MAXBSIZE),
MAXBSIZE, HAT_UNLOAD);
}
} else {
if (segmap_kpm)
segkpm_mapout_validkpme(GET_KPME(smp));
smp->sm_flags &= ~SM_NOTKPM_RELEASED;
hat_unload(kas.a_hat, addr, MAXBSIZE,
HAT_UNLOAD);
}
}
segmap_smapadd(smp);
}
mutex_exit(smtx);
if (is_kpm)
page_unlock(pp);
if ((flags & ~SM_DONTNEED) != 0) {
error = VOP_PUTPAGE(vp, offset, MAXBSIZE,
bflags, CRED(), NULL);
} else {
error = 0;
}
return (error);
}
static void
segmap_dump(struct seg *seg)
{
struct segmap_data *smd;
struct smap *smp, *smp_end;
page_t *pp;
pfn_t pfn;
u_offset_t off;
caddr_t addr;
smd = (struct segmap_data *)seg->s_data;
addr = seg->s_base;
for (smp = smd->smd_sm, smp_end = smp + smd->smd_npages;
smp < smp_end; smp++) {
if (smp->sm_refcnt) {
for (off = 0; off < MAXBSIZE; off += PAGESIZE) {
int we_own_it = 0;
if ((pp = page_lookup_nowait(smp->sm_vp,
smp->sm_off + off, SE_SHARED)))
we_own_it = 1;
else
pp = page_exists(smp->sm_vp,
smp->sm_off + off);
if (pp) {
pfn = page_pptonum(pp);
dump_addpage(seg->s_as,
addr + off, pfn);
if (we_own_it)
page_unlock(pp);
}
dump_timeleft = dump_timeout;
}
}
addr += MAXBSIZE;
}
}
static int
segmap_pagelock(struct seg *seg, caddr_t addr, size_t len,
struct page ***ppp, enum lock_type type, enum seg_rw rw)
{
return (ENOTSUP);
}
static int
segmap_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
{
struct segmap_data *smd = (struct segmap_data *)seg->s_data;
memidp->val[0] = (uintptr_t)smd->smd_sm->sm_vp;
memidp->val[1] = smd->smd_sm->sm_off + (uintptr_t)(addr - seg->s_base);
return (0);
}
static lgrp_mem_policy_info_t *
segmap_getpolicy(struct seg *seg, caddr_t addr)
{
return (NULL);
}
static int
segmap_capable(struct seg *seg, segcapability_t capability)
{
return (0);
}
#ifdef SEGKPM_SUPPORT
static caddr_t
segmap_pagecreate_kpm(struct seg *seg, vnode_t *vp, u_offset_t off,
struct smap *smp, enum seg_rw rw)
{
caddr_t base;
page_t *pp;
int newpage = 0;
struct kpme *kpme;
ASSERT(smp->sm_refcnt > 0);
if ((pp = page_lookup(vp, off, SE_SHARED)) == NULL) {
kmutex_t *smtx;
base = segkpm_create_va(off);
if ((pp = page_create_va(vp, off, PAGESIZE, PG_WAIT,
seg, base)) == NULL) {
panic("segmap_pagecreate_kpm: "
"page_create failed");
}
newpage = 1;
page_io_unlock(pp);
ASSERT((u_offset_t)(off - smp->sm_off) <= INT_MAX);
smtx = SMAPMTX(smp);
mutex_enter(smtx);
smp->sm_flags |= SM_KPM_NEWPAGE;
mutex_exit(smtx);
}
kpme = GET_KPME(smp);
if (!newpage && kpme->kpe_page == pp)
base = hat_kpm_page2va(pp, 0);
else
base = hat_kpm_mapin(pp, kpme);
if (rw == S_WRITE) {
hat_setrefmod(pp);
} else {
ASSERT(rw == S_READ);
hat_setref(pp);
}
smd_cpu[CPU->cpu_seqid].scpu.scpu_pagecreate++;
return (base);
}
struct smap *
get_smap_kpm(caddr_t addr, page_t **ppp)
{
struct smap *smp;
struct vnode *vp;
u_offset_t offset;
caddr_t baseaddr = (caddr_t)((uintptr_t)addr & MAXBMASK);
int hashid;
kmutex_t *hashmtx;
page_t *pp;
union segmap_cpu *scpu;
pp = hat_kpm_vaddr2page(baseaddr);
ASSERT(pp && !PP_ISFREE(pp));
ASSERT(PAGE_LOCKED(pp));
ASSERT(((uintptr_t)pp->p_offset & MAXBOFFSET) == 0);
vp = pp->p_vnode;
offset = pp->p_offset;
ASSERT(vp != NULL);
scpu = smd_cpu+CPU->cpu_seqid;
smp = scpu->scpu.scpu_last_smap;
mutex_enter(&smp->sm_mtx);
if (smp->sm_vp == vp && smp->sm_off == offset) {
ASSERT(smp->sm_refcnt > 0);
} else {
mutex_exit(&smp->sm_mtx);
SMAP_HASHFUNC(vp, offset, hashid);
hashmtx = SHASHMTX(hashid);
mutex_enter(hashmtx);
smp = smd_hash[hashid].sh_hash_list;
for (; smp != NULL; smp = smp->sm_hash) {
if (smp->sm_vp == vp && smp->sm_off == offset)
break;
}
mutex_exit(hashmtx);
if (smp) {
mutex_enter(&smp->sm_mtx);
ASSERT(smp->sm_vp == vp && smp->sm_off == offset);
}
}
if (ppp)
*ppp = smp ? pp : NULL;
return (smp);
}
#else
static caddr_t
segmap_pagecreate_kpm(struct seg *seg, vnode_t *vp, u_offset_t off,
struct smap *smp, enum seg_rw rw)
{
return (NULL);
}
struct smap *
get_smap_kpm(caddr_t addr, page_t **ppp)
{
return (NULL);
}
#endif