#include <sys/types.h>
#include <sys/kmem.h>
#include <sys/cmn_err.h>
#include <sys/ksynch.h>
#include <sys/debug.h>
#include <sys/cyclic.h>
#include <sys/machsystm.h>
#include <sys/vm.h>
#include <sys/machcpuvar.h>
#include <sys/mmu.h>
#include <sys/pte.h>
#include <vm/hat.h>
#include <vm/as.h>
#include <vm/hat_sfmmu.h>
#include <sys/vm_machparam.h>
#include <vm/seg_kmem.h>
#include <vm/seg_kpm.h>
#include <sys/hypervisor_api.h>
#include <sys/ldc.h>
#include <sys/ldc_impl.h>
extern ldc_soft_state_t *ldcssp;
extern int ldc_max_retries;
extern clock_t ldc_delay;
#ifdef DEBUG
extern int ldcdbg;
#endif
extern void i_ldc_reset(ldc_chan_t *ldcp, boolean_t force_reset);
extern int i_ldc_h2v_error(int h_error);
#ifdef DEBUG
extern void ldcdebug(int64_t id, const char *fmt, ...);
#endif
static int i_ldc_mem_acquire_release(ldc_mem_handle_t mhandle,
uint8_t direction, uint64_t offset, size_t size);
static int i_ldc_dring_acquire_release(ldc_dring_handle_t dhandle,
uint8_t direction, uint64_t start, uint64_t end);
static int i_ldc_mem_map(ldc_mem_handle_t mhandle, ldc_mem_cookie_t *cookie,
uint32_t ccount, uint8_t mtype, uint8_t perm, caddr_t *vaddr,
caddr_t *raddr);
static int i_ldc_mem_bind_handle(ldc_mem_handle_t mhandle, caddr_t vaddr,
size_t len, uint8_t mtype, uint8_t perm, ldc_mem_cookie_t *cookie,
uint32_t *ccount);
int ldc_shmem_enabled = 1;
int ldc_dring_shmem_enabled = 1;
static int ldc_dring_shmem_hv_major = 1;
static int ldc_dring_shmem_hv_minor = 1;
static int ldc_dring_shmem_hv_force = 0;
static int ldc_dring_shmem_hv_ok = 0;
uint64_t ldc_maptable_entries = LDC_MTBL_ENTRIES;
#define IDX2COOKIE(idx, pg_szc, pg_shift) \
(((pg_szc) << LDC_COOKIE_PGSZC_SHIFT) | ((idx) << (pg_shift)))
#define GIGABYTE ((uint64_t)(1 << 30))
uint64_t ldc_mapin_size_min = GIGABYTE;
#define LDC_MAPIN_VER_MAJOR 1
#define LDC_MAPIN_VER_MINOR 2
void
i_ldc_mem_set_hsvc_vers(uint64_t major, uint64_t minor)
{
if ((major == ldc_dring_shmem_hv_major &&
minor >= ldc_dring_shmem_hv_minor) ||
(major > ldc_dring_shmem_hv_major) ||
(ldc_dring_shmem_hv_force != 0)) {
ldc_dring_shmem_hv_ok = 1;
}
}
void
i_ldc_init_mapin(ldc_soft_state_t *ldcssp, uint64_t major, uint64_t minor)
{
int rv;
uint64_t sz;
uint64_t table_type = LDC_MAPIN_TYPE_REGULAR;
ldcssp->mapin_size = LDC_DIRECT_MAP_SIZE_DEFAULT;
if ((major == LDC_MAPIN_VER_MAJOR &&
minor < LDC_MAPIN_VER_MINOR) ||
(major < LDC_MAPIN_VER_MAJOR)) {
return;
}
rv = hv_ldc_mapin_size_max(table_type, &sz);
if (rv != 0) {
cmn_err(CE_NOTE, "Failed to get mapin information\n");
return;
}
ldcssp->mapin_size = sz;
D1(DBG_ALL_LDCS, "%s: mapin_size read from HV is (0x%llx)\n",
__func__, sz);
}
int
ldc_mem_alloc_handle(ldc_handle_t handle, ldc_mem_handle_t *mhandle)
{
ldc_chan_t *ldcp;
ldc_mhdl_t *mhdl;
if (handle == 0) {
DWARN(DBG_ALL_LDCS,
"ldc_mem_alloc_handle: invalid channel handle\n");
return (EINVAL);
}
ldcp = (ldc_chan_t *)handle;
mutex_enter(&ldcp->lock);
if ((ldcp->tstate & ~TS_IN_RESET) < TS_INIT) {
DWARN(ldcp->id,
"ldc_mem_alloc_handle: (0x%llx) channel not initialized\n",
ldcp->id);
mutex_exit(&ldcp->lock);
return (EINVAL);
}
mhdl = kmem_cache_alloc(ldcssp->memhdl_cache, KM_SLEEP);
mutex_init(&mhdl->lock, NULL, MUTEX_DRIVER, NULL);
mhdl->myshadow = B_FALSE;
mhdl->memseg = NULL;
mhdl->ldcp = ldcp;
mhdl->status = LDC_UNBOUND;
if (ldcp->mhdl_list == NULL) {
ldcp->mhdl_list = mhdl;
mhdl->next = NULL;
} else {
mhdl->next = ldcp->mhdl_list;
ldcp->mhdl_list = mhdl;
}
*mhandle = (ldc_mem_handle_t)mhdl;
mutex_exit(&ldcp->lock);
D1(ldcp->id, "ldc_mem_alloc_handle: (0x%llx) allocated handle 0x%llx\n",
ldcp->id, mhdl);
return (0);
}
int
ldc_mem_free_handle(ldc_mem_handle_t mhandle)
{
ldc_mhdl_t *mhdl, *phdl;
ldc_chan_t *ldcp;
if (mhandle == 0) {
DWARN(DBG_ALL_LDCS,
"ldc_mem_free_handle: invalid memory handle\n");
return (EINVAL);
}
mhdl = (ldc_mhdl_t *)mhandle;
mutex_enter(&mhdl->lock);
ldcp = mhdl->ldcp;
if (mhdl->status == LDC_BOUND || mhdl->status == LDC_MAPPED) {
DWARN(ldcp->id,
"ldc_mem_free_handle: cannot free, 0x%llx hdl bound\n",
mhdl);
mutex_exit(&mhdl->lock);
return (EINVAL);
}
mutex_exit(&mhdl->lock);
mutex_enter(&ldcp->mlist_lock);
phdl = ldcp->mhdl_list;
if (phdl == mhdl) {
ldcp->mhdl_list = mhdl->next;
mutex_destroy(&mhdl->lock);
kmem_cache_free(ldcssp->memhdl_cache, mhdl);
D1(ldcp->id,
"ldc_mem_free_handle: (0x%llx) freed handle 0x%llx\n",
ldcp->id, mhdl);
} else {
while (phdl != NULL) {
if (phdl->next == mhdl) {
phdl->next = mhdl->next;
mutex_destroy(&mhdl->lock);
kmem_cache_free(ldcssp->memhdl_cache, mhdl);
D1(ldcp->id,
"ldc_mem_free_handle: (0x%llx) freed "
"handle 0x%llx\n", ldcp->id, mhdl);
break;
}
phdl = phdl->next;
}
}
if (phdl == NULL) {
DWARN(ldcp->id,
"ldc_mem_free_handle: invalid handle 0x%llx\n", mhdl);
mutex_exit(&ldcp->mlist_lock);
return (EINVAL);
}
mutex_exit(&ldcp->mlist_lock);
return (0);
}
int
ldc_mem_bind_handle(ldc_mem_handle_t mhandle, caddr_t vaddr, size_t len,
uint8_t mtype, uint8_t perm, ldc_mem_cookie_t *cookie, uint32_t *ccount)
{
if (ldc_shmem_enabled == 0)
mtype = LDC_SHADOW_MAP;
return (i_ldc_mem_bind_handle(mhandle, vaddr, len, mtype, perm,
cookie, ccount));
}
static int
i_ldc_mem_bind_handle(ldc_mem_handle_t mhandle, caddr_t vaddr, size_t len,
uint8_t mtype, uint8_t perm, ldc_mem_cookie_t *cookie, uint32_t *ccount)
{
ldc_mhdl_t *mhdl;
ldc_chan_t *ldcp;
ldc_mtbl_t *mtbl;
ldc_memseg_t *memseg;
ldc_mte_t tmp_mte;
uint64_t index, prev_index = 0;
int64_t cookie_idx;
uintptr_t raddr, ra_aligned;
uint64_t psize, poffset, v_offset;
uint64_t pg_shift, pg_size, pg_size_code, pg_mask;
pgcnt_t npages;
caddr_t v_align, addr;
int i, rv;
if (mhandle == 0) {
DWARN(DBG_ALL_LDCS,
"ldc_mem_bind_handle: invalid memory handle\n");
return (EINVAL);
}
mhdl = (ldc_mhdl_t *)mhandle;
ldcp = mhdl->ldcp;
*ccount = 0;
mutex_enter(&mhdl->lock);
if (mhdl->status == LDC_BOUND || mhdl->memseg != NULL) {
DWARN(ldcp->id,
"ldc_mem_bind_handle: (0x%x) handle already bound\n",
mhandle);
mutex_exit(&mhdl->lock);
return (EINVAL);
}
if ((((uintptr_t)vaddr | len) & 0x7) != 0) {
DWARN(ldcp->id,
"ldc_mem_bind_handle: addr/size is not 8-byte aligned\n");
mutex_exit(&mhdl->lock);
return (EINVAL);
}
mutex_enter(&ldcp->lock);
if ((mtbl = ldcp->mtbl) == NULL) {
mtbl = kmem_zalloc(sizeof (ldc_mtbl_t), KM_SLEEP);
mtbl->num_entries = mtbl->num_avail = ldc_maptable_entries;
mtbl->size = ldc_maptable_entries * sizeof (ldc_mte_slot_t);
mtbl->next_entry = 0;
mtbl->contigmem = B_TRUE;
mtbl->table = (ldc_mte_slot_t *)
contig_mem_alloc_align(mtbl->size, MMU_PAGESIZE);
if (mtbl->table == NULL) {
mtbl->table = kmem_alloc(MMU_PAGESIZE, KM_SLEEP);
mtbl->size = MMU_PAGESIZE;
mtbl->contigmem = B_FALSE;
mtbl->num_entries = mtbl->num_avail =
mtbl->size / sizeof (ldc_mte_slot_t);
DWARN(ldcp->id,
"ldc_mem_bind_handle: (0x%llx) reduced tbl size "
"to %lx entries\n", ldcp->id, mtbl->num_entries);
}
bzero(mtbl->table, mtbl->size);
mutex_init(&mtbl->lock, NULL, MUTEX_DRIVER, NULL);
rv = hv_ldc_set_map_table(ldcp->id,
va_to_pa(mtbl->table), mtbl->num_entries);
if (rv != 0) {
DWARN(DBG_ALL_LDCS,
"ldc_mem_bind_handle: (0x%lx) err %d mapping tbl",
ldcp->id, rv);
if (mtbl->contigmem)
contig_mem_free(mtbl->table, mtbl->size);
else
kmem_free(mtbl->table, mtbl->size);
mutex_destroy(&mtbl->lock);
kmem_free(mtbl, sizeof (ldc_mtbl_t));
mutex_exit(&ldcp->lock);
mutex_exit(&mhdl->lock);
return (EIO);
}
ldcp->mtbl = mtbl;
D1(ldcp->id,
"ldc_mem_bind_handle: (0x%llx) alloc'd map table 0x%llx\n",
ldcp->id, ldcp->mtbl->table);
}
mutex_exit(&ldcp->lock);
pg_size = MMU_PAGESIZE;
pg_size_code = page_szc(pg_size);
pg_shift = page_get_shift(pg_size_code);
pg_mask = ~(pg_size - 1);
D1(ldcp->id, "ldc_mem_bind_handle: (0x%llx) binding "
"va 0x%llx pgsz=0x%llx, pgszc=0x%llx, pg_shift=0x%llx\n",
ldcp->id, vaddr, pg_size, pg_size_code, pg_shift);
v_align = (caddr_t)(((uintptr_t)vaddr) & ~(pg_size - 1));
v_offset = ((uintptr_t)vaddr) & (pg_size - 1);
npages = (len+v_offset)/pg_size;
npages = ((len+v_offset)%pg_size == 0) ? npages : npages+1;
D1(ldcp->id, "ldc_mem_bind_handle: binding "
"(0x%llx) v=0x%llx,val=0x%llx,off=0x%x,pgs=0x%x\n",
ldcp->id, vaddr, v_align, v_offset, npages);
mutex_enter(&mtbl->lock);
if (npages > mtbl->num_avail) {
D1(ldcp->id, "ldc_mem_bind_handle: (0x%llx) no table entries\n",
ldcp->id);
mutex_exit(&mtbl->lock);
mutex_exit(&mhdl->lock);
return (ENOMEM);
}
memseg = mhdl->memseg =
kmem_cache_alloc(ldcssp->memseg_cache, KM_SLEEP);
memseg->pages = kmem_zalloc((sizeof (ldc_page_t) * npages), KM_SLEEP);
memseg->cookies =
kmem_zalloc((sizeof (ldc_mem_cookie_t) * npages), KM_SLEEP);
D2(ldcp->id, "ldc_mem_bind_handle: (0x%llx) processing 0x%llx pages\n",
ldcp->id, npages);
addr = v_align;
index = mtbl->next_entry;
cookie_idx = -1;
tmp_mte.ll = 0;
if (mtype & LDC_DIRECT_MAP) {
tmp_mte.mte_r = (perm & LDC_MEM_R) ? 1 : 0;
tmp_mte.mte_w = (perm & LDC_MEM_W) ? 1 : 0;
tmp_mte.mte_x = (perm & LDC_MEM_X) ? 1 : 0;
}
if (mtype & LDC_SHADOW_MAP) {
tmp_mte.mte_cr = (perm & LDC_MEM_R) ? 1 : 0;
tmp_mte.mte_cw = (perm & LDC_MEM_W) ? 1 : 0;
}
if (mtype & LDC_IO_MAP) {
tmp_mte.mte_ir = (perm & LDC_MEM_R) ? 1 : 0;
tmp_mte.mte_iw = (perm & LDC_MEM_W) ? 1 : 0;
}
D1(ldcp->id, "ldc_mem_bind_handle mte=0x%llx\n", tmp_mte.ll);
tmp_mte.mte_pgszc = pg_size_code;
for (i = 0; i < npages; i++) {
while (mtbl->table[index].entry.ll != 0) {
index = (index + 1) % mtbl->num_entries;
if (index == mtbl->next_entry) {
DWARN(DBG_ALL_LDCS,
"ldc_mem_bind_handle: (0x%llx) cannot find "
"entry\n", ldcp->id);
*ccount = 0;
mutex_exit(&mtbl->lock);
mutex_exit(&mhdl->lock);
return (ENOMEM);
}
}
raddr = va_to_pa((void *)addr);
ra_aligned = ((uintptr_t)raddr & pg_mask);
tmp_mte.mte_rpfn = ra_aligned >> pg_shift;
D1(ldcp->id, "ldc_mem_bind_handle mte=0x%llx\n", tmp_mte.ll);
mtbl->table[index].entry = tmp_mte;
D2(ldcp->id, "ldc_mem_bind_handle: (0x%llx) stored MTE 0x%llx"
" into loc 0x%llx\n", ldcp->id, tmp_mte.ll, index);
if (i == 0) {
psize = min((pg_size - v_offset), len);
poffset = v_offset;
} else if (i == (npages - 1)) {
psize = (((uintptr_t)(vaddr + len)) &
((uint64_t)(pg_size-1)));
if (psize == 0)
psize = pg_size;
poffset = 0;
} else {
psize = pg_size;
poffset = 0;
}
memseg->pages[i].index = index;
memseg->pages[i].raddr = raddr;
memseg->pages[i].mte = &(mtbl->table[index]);
if (i == 0 || (index != prev_index + 1)) {
cookie_idx++;
memseg->cookies[cookie_idx].addr =
IDX2COOKIE(index, pg_size_code, pg_shift);
memseg->cookies[cookie_idx].addr |= poffset;
memseg->cookies[cookie_idx].size = psize;
} else {
memseg->cookies[cookie_idx].size += psize;
}
D1(ldcp->id, "ldc_mem_bind_handle: bound "
"(0x%llx) va=0x%llx, idx=0x%llx, "
"ra=0x%llx(sz=0x%x,off=0x%x)\n",
ldcp->id, addr, index, raddr, psize, poffset);
mtbl->num_avail--;
addr += pg_size;
prev_index = index;
index = (index + 1) % mtbl->num_entries;
mtbl->next_entry = index;
}
mutex_exit(&mtbl->lock);
mhdl->mtype = mtype;
mhdl->perm = perm;
mhdl->status = LDC_BOUND;
memseg->vaddr = vaddr;
memseg->raddr = memseg->pages[0].raddr;
memseg->size = len;
memseg->npages = npages;
memseg->ncookies = cookie_idx + 1;
memseg->next_cookie = (memseg->ncookies > 1) ? 1 : 0;
*ccount = memseg->ncookies;
cookie->addr = memseg->cookies[0].addr;
cookie->size = memseg->cookies[0].size;
D1(ldcp->id,
"ldc_mem_bind_handle: (0x%llx) bound 0x%llx, va=0x%llx, "
"pgs=0x%llx cookies=0x%llx\n",
ldcp->id, mhdl, vaddr, npages, memseg->ncookies);
mutex_exit(&mhdl->lock);
return (0);
}
int
ldc_mem_nextcookie(ldc_mem_handle_t mhandle, ldc_mem_cookie_t *cookie)
{
ldc_mhdl_t *mhdl;
ldc_chan_t *ldcp;
ldc_memseg_t *memseg;
if (mhandle == 0) {
DWARN(DBG_ALL_LDCS,
"ldc_mem_nextcookie: invalid memory handle\n");
return (EINVAL);
}
mhdl = (ldc_mhdl_t *)mhandle;
mutex_enter(&mhdl->lock);
ldcp = mhdl->ldcp;
memseg = mhdl->memseg;
if (cookie == 0) {
DWARN(ldcp->id,
"ldc_mem_nextcookie:(0x%llx) invalid cookie arg\n",
ldcp->id);
mutex_exit(&mhdl->lock);
return (EINVAL);
}
if (memseg->next_cookie != 0) {
cookie->addr = memseg->cookies[memseg->next_cookie].addr;
cookie->size = memseg->cookies[memseg->next_cookie].size;
memseg->next_cookie++;
if (memseg->next_cookie == memseg->ncookies)
memseg->next_cookie = 0;
} else {
DWARN(ldcp->id,
"ldc_mem_nextcookie:(0x%llx) no more cookies\n", ldcp->id);
cookie->addr = 0;
cookie->size = 0;
mutex_exit(&mhdl->lock);
return (EINVAL);
}
D1(ldcp->id,
"ldc_mem_nextcookie: (0x%llx) cookie addr=0x%llx,sz=0x%llx\n",
ldcp->id, cookie->addr, cookie->size);
mutex_exit(&mhdl->lock);
return (0);
}
int
ldc_mem_unbind_handle(ldc_mem_handle_t mhandle)
{
ldc_mhdl_t *mhdl;
ldc_chan_t *ldcp;
ldc_mtbl_t *mtbl;
ldc_memseg_t *memseg;
uint64_t cookie_addr;
uint64_t pg_shift, pg_size_code;
int i, rv, retries;
if (mhandle == 0) {
DWARN(DBG_ALL_LDCS,
"ldc_mem_unbind_handle: invalid memory handle\n");
return (EINVAL);
}
mhdl = (ldc_mhdl_t *)mhandle;
mutex_enter(&mhdl->lock);
if (mhdl->status == LDC_UNBOUND) {
DWARN(DBG_ALL_LDCS,
"ldc_mem_unbind_handle: (0x%x) handle is not bound\n",
mhandle);
mutex_exit(&mhdl->lock);
return (EINVAL);
}
ldcp = mhdl->ldcp;
mtbl = ldcp->mtbl;
memseg = mhdl->memseg;
mutex_enter(&mtbl->lock);
for (i = 0; i < memseg->npages; i++) {
memseg->pages[i].mte->entry.ll = 0;
if (memseg->pages[i].mte->cookie) {
pg_size_code = page_szc(MMU_PAGESIZE);
pg_shift = page_get_shift(pg_size_code);
cookie_addr = IDX2COOKIE(memseg->pages[i].index,
pg_size_code, pg_shift);
D1(ldcp->id, "ldc_mem_unbind_handle: (0x%llx) revoke "
"cookie 0x%llx, rcookie 0x%llx\n", ldcp->id,
cookie_addr, memseg->pages[i].mte->cookie);
retries = 0;
do {
rv = hv_ldc_revoke(ldcp->id, cookie_addr,
memseg->pages[i].mte->cookie);
if (rv != H_EWOULDBLOCK)
break;
drv_usecwait(ldc_delay);
} while (retries++ < ldc_max_retries);
if (rv) {
DWARN(ldcp->id,
"ldc_mem_unbind_handle: (0x%llx) cannot "
"revoke mapping, cookie %llx\n", ldcp->id,
cookie_addr);
}
}
mtbl->num_avail++;
}
mutex_exit(&mtbl->lock);
kmem_free(memseg->pages, (sizeof (ldc_page_t) * memseg->npages));
kmem_free(memseg->cookies,
(sizeof (ldc_mem_cookie_t) * memseg->npages));
kmem_cache_free(ldcssp->memseg_cache, memseg);
mhdl->memseg = NULL;
mhdl->status = LDC_UNBOUND;
D1(ldcp->id, "ldc_mem_unbind_handle: (0x%llx) unbound handle 0x%llx\n",
ldcp->id, mhdl);
mutex_exit(&mhdl->lock);
return (0);
}
int
ldc_mem_info(ldc_mem_handle_t mhandle, ldc_mem_info_t *minfo)
{
ldc_mhdl_t *mhdl;
if (mhandle == 0) {
DWARN(DBG_ALL_LDCS, "ldc_mem_info: invalid memory handle\n");
return (EINVAL);
}
mhdl = (ldc_mhdl_t *)mhandle;
if (minfo == NULL) {
DWARN(DBG_ALL_LDCS, "ldc_mem_info: invalid args\n");
return (EINVAL);
}
mutex_enter(&mhdl->lock);
minfo->status = mhdl->status;
if (mhdl->status == LDC_BOUND || mhdl->status == LDC_MAPPED) {
minfo->vaddr = mhdl->memseg->vaddr;
minfo->raddr = mhdl->memseg->raddr;
minfo->mtype = mhdl->mtype;
minfo->perm = mhdl->perm;
}
mutex_exit(&mhdl->lock);
return (0);
}
int
ldc_mem_copy(ldc_handle_t handle, caddr_t vaddr, uint64_t off, size_t *size,
ldc_mem_cookie_t *cookies, uint32_t ccount, uint8_t direction)
{
ldc_chan_t *ldcp;
uint64_t local_voff, local_valign;
uint64_t cookie_addr, cookie_size;
uint64_t pg_shift, pg_size, pg_size_code;
uint64_t export_caddr, export_poff, export_psize, export_size;
uint64_t local_ra, local_poff, local_psize;
uint64_t copy_size, copied_len = 0, total_bal = 0, idx = 0;
pgcnt_t npages;
size_t len = *size;
int i, rv = 0;
uint64_t chid;
if (handle == 0) {
DWARN(DBG_ALL_LDCS, "ldc_mem_copy: invalid channel handle\n");
return (EINVAL);
}
ldcp = (ldc_chan_t *)handle;
chid = ldcp->id;
if (ldcp->tstate != TS_UP) {
DWARN(chid, "ldc_mem_copy: (0x%llx) channel is not UP\n",
chid);
return (ECONNRESET);
}
if ((((uintptr_t)vaddr | len) & 0x7) != 0) {
DWARN(chid,
"ldc_mem_copy: addr/sz is not 8-byte aligned\n");
return (EINVAL);
}
export_size = 0;
for (i = 0; i < ccount; i++)
export_size += cookies[i].size;
if (off > export_size) {
DWARN(chid,
"ldc_mem_copy: (0x%llx) start offset > export mem size\n",
chid);
return (EINVAL);
}
if ((export_size - off) < *size) {
DWARN(chid,
"ldc_mem_copy: (0x%llx) copy size > export mem size\n",
chid);
return (EINVAL);
}
total_bal = min(export_size, *size);
pg_size = MMU_PAGESIZE;
pg_size_code = page_szc(pg_size);
pg_shift = page_get_shift(pg_size_code);
D1(chid, "ldc_mem_copy: copying data "
"(0x%llx) va 0x%llx pgsz=0x%llx, pgszc=0x%llx, pg_shift=0x%llx\n",
chid, vaddr, pg_size, pg_size_code, pg_shift);
local_valign = (((uintptr_t)vaddr) & ~(pg_size - 1));
local_voff = ((uintptr_t)vaddr) & (pg_size - 1);
npages = (len+local_voff)/pg_size;
npages = ((len+local_voff)%pg_size == 0) ? npages : npages+1;
D1(chid,
"ldc_mem_copy: (0x%llx) v=0x%llx,val=0x%llx,off=0x%x,pgs=0x%x\n",
chid, vaddr, local_valign, local_voff, npages);
local_ra = va_to_pa((void *)local_valign);
local_poff = local_voff;
local_psize = min(len, (pg_size - local_voff));
len -= local_psize;
for (idx = 0; idx < ccount; idx++) {
cookie_size = cookies[idx].size;
if (off < cookie_size)
break;
off -= cookie_size;
}
cookie_addr = cookies[idx].addr + off;
cookie_size = cookies[idx].size - off;
export_caddr = cookie_addr & ~(pg_size - 1);
export_poff = cookie_addr & (pg_size - 1);
export_psize = min(cookie_size, (pg_size - export_poff));
for (;;) {
copy_size = min(export_psize, local_psize);
D1(chid,
"ldc_mem_copy:(0x%llx) dir=0x%x, caddr=0x%llx,"
" loc_ra=0x%llx, exp_poff=0x%llx, loc_poff=0x%llx,"
" exp_psz=0x%llx, loc_psz=0x%llx, copy_sz=0x%llx,"
" total_bal=0x%llx\n",
chid, direction, export_caddr, local_ra, export_poff,
local_poff, export_psize, local_psize, copy_size,
total_bal);
rv = hv_ldc_copy(chid, direction,
(export_caddr + export_poff), (local_ra + local_poff),
copy_size, &copied_len);
if (rv != 0) {
int error = EIO;
uint64_t rx_hd, rx_tl;
DWARN(chid,
"ldc_mem_copy: (0x%llx) err %d during copy\n",
(unsigned long long)chid, rv);
DWARN(chid,
"ldc_mem_copy: (0x%llx) dir=0x%x, caddr=0x%lx, "
"loc_ra=0x%lx, exp_poff=0x%lx, loc_poff=0x%lx,"
" exp_psz=0x%lx, loc_psz=0x%lx, copy_sz=0x%lx,"
" copied_len=0x%lx, total_bal=0x%lx\n",
chid, direction, export_caddr, local_ra,
export_poff, local_poff, export_psize, local_psize,
copy_size, copied_len, total_bal);
*size = *size - total_bal;
mutex_enter(&ldcp->lock);
mutex_enter(&ldcp->tx_lock);
rv = hv_ldc_rx_get_state(ldcp->id,
&rx_hd, &rx_tl, &(ldcp->link_state));
if (ldcp->link_state == LDC_CHANNEL_DOWN ||
ldcp->link_state == LDC_CHANNEL_RESET) {
i_ldc_reset(ldcp, B_FALSE);
error = ECONNRESET;
}
mutex_exit(&ldcp->tx_lock);
mutex_exit(&ldcp->lock);
return (error);
}
ASSERT(copied_len <= copy_size);
D2(chid, "ldc_mem_copy: copied=0x%llx\n", copied_len);
export_poff += copied_len;
local_poff += copied_len;
export_psize -= copied_len;
local_psize -= copied_len;
cookie_size -= copied_len;
total_bal -= copied_len;
if (copy_size != copied_len)
continue;
if (export_psize == 0 && total_bal != 0) {
if (cookie_size == 0) {
idx++;
cookie_addr = cookies[idx].addr;
cookie_size = cookies[idx].size;
export_caddr = cookie_addr & ~(pg_size - 1);
export_poff = cookie_addr & (pg_size - 1);
export_psize =
min(cookie_size, (pg_size-export_poff));
} else {
export_caddr += pg_size;
export_poff = 0;
export_psize = min(cookie_size, pg_size);
}
}
if (local_psize == 0 && total_bal != 0) {
local_valign += pg_size;
local_ra = va_to_pa((void *)local_valign);
local_poff = 0;
local_psize = min(pg_size, len);
len -= local_psize;
}
if (total_bal == 0)
break;
}
D1(chid,
"ldc_mem_copy: (0x%llx) done copying sz=0x%llx\n",
chid, *size);
return (0);
}
int
ldc_mem_rdwr_cookie(ldc_handle_t handle, caddr_t vaddr, size_t *size,
caddr_t paddr, uint8_t direction)
{
ldc_chan_t *ldcp;
uint64_t local_voff, local_valign;
uint64_t pg_shift, pg_size, pg_size_code;
uint64_t target_pa, target_poff, target_psize, target_size;
uint64_t local_ra, local_poff, local_psize;
uint64_t copy_size, copied_len = 0;
pgcnt_t npages;
size_t len = *size;
int rv = 0;
if (handle == 0) {
DWARN(DBG_ALL_LDCS,
"ldc_mem_rdwr_cookie: invalid channel handle\n");
return (EINVAL);
}
ldcp = (ldc_chan_t *)handle;
mutex_enter(&ldcp->lock);
if (ldcp->tstate != TS_UP) {
DWARN(ldcp->id,
"ldc_mem_rdwr_cookie: (0x%llx) channel is not UP\n",
ldcp->id);
mutex_exit(&ldcp->lock);
return (ECONNRESET);
}
if ((((uintptr_t)vaddr | len) & 0x7) != 0) {
DWARN(ldcp->id,
"ldc_mem_rdwr_cookie: addr/size is not 8-byte aligned\n");
mutex_exit(&ldcp->lock);
return (EINVAL);
}
target_size = *size;
pg_size = MMU_PAGESIZE;
pg_size_code = page_szc(pg_size);
pg_shift = page_get_shift(pg_size_code);
D1(ldcp->id, "ldc_mem_rdwr_cookie: copying data "
"(0x%llx) va 0x%llx pgsz=0x%llx, pgszc=0x%llx, pg_shift=0x%llx\n",
ldcp->id, vaddr, pg_size, pg_size_code, pg_shift);
local_valign = ((uintptr_t)vaddr) & ~(pg_size - 1);
local_voff = ((uintptr_t)vaddr) & (pg_size - 1);
npages = (len + local_voff) / pg_size;
npages = ((len + local_voff) % pg_size == 0) ? npages : npages+1;
D1(ldcp->id, "ldc_mem_rdwr_cookie: (0x%llx) v=0x%llx, "
"val=0x%llx,off=0x%x,pgs=0x%x\n",
ldcp->id, vaddr, local_valign, local_voff, npages);
local_ra = va_to_pa((void *)local_valign);
local_poff = local_voff;
local_psize = min(len, (pg_size - local_voff));
len -= local_psize;
target_pa = ((uintptr_t)paddr) & ~(pg_size - 1);
target_poff = ((uintptr_t)paddr) & (pg_size - 1);
target_psize = pg_size - target_poff;
for (;;) {
copy_size = min(target_psize, local_psize);
D1(ldcp->id,
"ldc_mem_rdwr_cookie: (0x%llx) dir=0x%x, tar_pa=0x%llx,"
" loc_ra=0x%llx, tar_poff=0x%llx, loc_poff=0x%llx,"
" tar_psz=0x%llx, loc_psz=0x%llx, copy_sz=0x%llx,"
" total_bal=0x%llx\n",
ldcp->id, direction, target_pa, local_ra, target_poff,
local_poff, target_psize, local_psize, copy_size,
target_size);
rv = hv_ldc_copy(ldcp->id, direction,
(target_pa + target_poff), (local_ra + local_poff),
copy_size, &copied_len);
if (rv != 0) {
DWARN(DBG_ALL_LDCS,
"ldc_mem_rdwr_cookie: (0x%lx) err %d during copy\n",
ldcp->id, rv);
DWARN(DBG_ALL_LDCS,
"ldc_mem_rdwr_cookie: (0x%llx) dir=%lld, "
"tar_pa=0x%llx, loc_ra=0x%llx, tar_poff=0x%llx, "
"loc_poff=0x%llx, tar_psz=0x%llx, loc_psz=0x%llx, "
"copy_sz=0x%llx, total_bal=0x%llx\n",
ldcp->id, direction, target_pa, local_ra,
target_poff, local_poff, target_psize, local_psize,
copy_size, target_size);
*size = *size - target_size;
mutex_exit(&ldcp->lock);
return (i_ldc_h2v_error(rv));
}
D2(ldcp->id, "ldc_mem_rdwr_cookie: copied=0x%llx\n",
copied_len);
target_poff += copied_len;
local_poff += copied_len;
target_psize -= copied_len;
local_psize -= copied_len;
target_size -= copied_len;
if (copy_size != copied_len)
continue;
if (target_psize == 0 && target_size != 0) {
target_pa += pg_size;
target_poff = 0;
target_psize = min(pg_size, target_size);
}
if (local_psize == 0 && target_size != 0) {
local_valign += pg_size;
local_ra = va_to_pa((void *)local_valign);
local_poff = 0;
local_psize = min(pg_size, len);
len -= local_psize;
}
if (target_size == 0)
break;
}
mutex_exit(&ldcp->lock);
D1(ldcp->id, "ldc_mem_rdwr_cookie: (0x%llx) done copying sz=0x%llx\n",
ldcp->id, *size);
return (0);
}
int
ldc_mem_map(ldc_mem_handle_t mhandle, ldc_mem_cookie_t *cookie, uint32_t ccount,
uint8_t mtype, uint8_t perm, caddr_t *vaddr, caddr_t *raddr)
{
if (ldc_shmem_enabled == 0)
mtype = LDC_SHADOW_MAP;
return (i_ldc_mem_map(mhandle, cookie, ccount, mtype, perm,
vaddr, raddr));
}
static int
i_ldc_mem_map(ldc_mem_handle_t mhandle, ldc_mem_cookie_t *cookie,
uint32_t ccount, uint8_t mtype, uint8_t perm, caddr_t *vaddr,
caddr_t *raddr)
{
int i, j, idx, rv, retries;
ldc_chan_t *ldcp;
ldc_mhdl_t *mhdl;
ldc_memseg_t *memseg;
caddr_t tmpaddr;
uint64_t map_perm = perm;
uint64_t pg_size, pg_shift, pg_size_code, pg_mask;
uint64_t exp_size = 0, base_off, map_size, npages;
uint64_t cookie_addr, cookie_off, cookie_size;
tte_t ldc_tte;
if (mhandle == 0) {
DWARN(DBG_ALL_LDCS, "ldc_mem_map: invalid memory handle\n");
return (EINVAL);
}
mhdl = (ldc_mhdl_t *)mhandle;
mutex_enter(&mhdl->lock);
if (mhdl->status == LDC_BOUND || mhdl->status == LDC_MAPPED ||
mhdl->memseg != NULL) {
DWARN(DBG_ALL_LDCS,
"ldc_mem_map: (0x%llx) handle bound/mapped\n", mhandle);
mutex_exit(&mhdl->lock);
return (EINVAL);
}
ldcp = mhdl->ldcp;
mutex_enter(&ldcp->lock);
if (ldcp->tstate != TS_UP) {
DWARN(ldcp->id,
"ldc_mem_dring_map: (0x%llx) channel is not UP\n",
ldcp->id);
mutex_exit(&ldcp->lock);
mutex_exit(&mhdl->lock);
return (ECONNRESET);
}
if ((mtype & (LDC_SHADOW_MAP|LDC_DIRECT_MAP|LDC_IO_MAP)) == 0) {
DWARN(ldcp->id, "ldc_mem_map: invalid map type\n");
mutex_exit(&ldcp->lock);
mutex_exit(&mhdl->lock);
return (EINVAL);
}
D1(ldcp->id, "ldc_mem_map: (0x%llx) cookie = 0x%llx,0x%llx\n",
ldcp->id, cookie->addr, cookie->size);
pg_size = MMU_PAGESIZE;
pg_size_code = page_szc(pg_size);
pg_shift = page_get_shift(pg_size_code);
pg_mask = ~(pg_size - 1);
base_off = cookie[0].addr & (pg_size - 1);
for (idx = 0; idx < ccount; idx++)
exp_size += cookie[idx].size;
map_size = P2ROUNDUP((exp_size + base_off), pg_size);
npages = (map_size >> pg_shift);
memseg = mhdl->memseg =
kmem_cache_alloc(ldcssp->memseg_cache, KM_SLEEP);
memseg->pages = kmem_zalloc((sizeof (ldc_page_t) * npages), KM_SLEEP);
memseg->cookies =
kmem_zalloc((sizeof (ldc_mem_cookie_t) * ccount), KM_SLEEP);
D2(ldcp->id, "ldc_mem_map: (0x%llx) exp_size=0x%llx, map_size=0x%llx,"
"pages=0x%llx\n", ldcp->id, exp_size, map_size, npages);
if (mtype == LDC_DIRECT_MAP) {
memseg->vaddr = vmem_xalloc(heap_arena, map_size,
pg_size, 0, 0, NULL, NULL, VM_NOSLEEP);
if (memseg->vaddr == NULL) {
DWARN(DBG_ALL_LDCS,
"ldc_mem_map: (0x%lx) memory map failed\n",
ldcp->id);
kmem_free(memseg->cookies,
(sizeof (ldc_mem_cookie_t) * ccount));
kmem_free(memseg->pages,
(sizeof (ldc_page_t) * npages));
kmem_cache_free(ldcssp->memseg_cache, memseg);
mutex_exit(&ldcp->lock);
mutex_exit(&mhdl->lock);
return (ENOMEM);
}
hat_unload(kas.a_hat, memseg->vaddr, map_size,
HAT_UNLOAD_NOSYNC | HAT_UNLOAD_UNLOCK);
idx = 0;
cookie_size = 0;
tmpaddr = memseg->vaddr;
for (i = 0; i < npages; i++) {
if (cookie_size == 0) {
ASSERT(idx < ccount);
cookie_addr = cookie[idx].addr & pg_mask;
cookie_off = cookie[idx].addr & (pg_size - 1);
cookie_size =
P2ROUNDUP((cookie_off + cookie[idx].size),
pg_size);
idx++;
}
D1(ldcp->id, "ldc_mem_map: (0x%llx) mapping "
"cookie 0x%llx, bal=0x%llx\n", ldcp->id,
cookie_addr, cookie_size);
for (retries = 0; retries < ldc_max_retries;
retries++) {
rv = hv_ldc_mapin(ldcp->id, cookie_addr,
&memseg->pages[i].raddr, &map_perm);
if (rv != H_EWOULDBLOCK && rv != H_ETOOMANY)
break;
drv_usecwait(ldc_delay);
}
if (rv || memseg->pages[i].raddr == 0) {
DWARN(ldcp->id,
"ldc_mem_map: (0x%llx) hv mapin err %d\n",
ldcp->id, rv);
hat_unload(kas.a_hat, memseg->vaddr, map_size,
HAT_UNLOAD_NOSYNC | HAT_UNLOAD_UNLOCK);
for (j = 0; j < i; j++) {
rv = hv_ldc_unmap(
memseg->pages[j].raddr);
if (rv) {
DWARN(ldcp->id,
"ldc_mem_map: (0x%llx) "
"cannot unmap ra=0x%llx\n",
ldcp->id,
memseg->pages[j].raddr);
}
}
vmem_free(heap_arena, (void *)memseg->vaddr,
map_size);
mtype = LDC_SHADOW_MAP;
break;
} else {
D1(ldcp->id,
"ldc_mem_map: (0x%llx) vtop map 0x%llx -> "
"0x%llx, cookie=0x%llx, perm=0x%llx\n",
ldcp->id, tmpaddr, memseg->pages[i].raddr,
cookie_addr, perm);
sfmmu_memtte(&ldc_tte,
(pfn_t)(memseg->pages[i].raddr >> pg_shift),
PROT_READ | PROT_WRITE | HAT_NOSYNC, TTE8K);
D1(ldcp->id,
"ldc_mem_map: (0x%llx) ra 0x%llx -> "
"tte 0x%llx\n", ldcp->id,
memseg->pages[i].raddr, ldc_tte);
sfmmu_tteload(kas.a_hat, &ldc_tte, tmpaddr,
NULL, HAT_LOAD_LOCK);
cookie_size -= pg_size;
cookie_addr += pg_size;
tmpaddr += pg_size;
}
}
}
if (mtype == LDC_SHADOW_MAP) {
if (*vaddr == NULL) {
memseg->vaddr = kmem_zalloc(exp_size, KM_SLEEP);
mhdl->myshadow = B_TRUE;
D1(ldcp->id, "ldc_mem_map: (0x%llx) allocated "
"shadow page va=0x%llx\n", ldcp->id, memseg->vaddr);
} else {
memseg->vaddr = *vaddr;
}
for (i = 0, tmpaddr = memseg->vaddr; i < npages; i++) {
memseg->pages[i].raddr = va_to_pa(tmpaddr);
tmpaddr += pg_size;
}
}
bcopy(cookie, memseg->cookies, ccount * sizeof (ldc_mem_cookie_t));
memseg->raddr = memseg->pages[0].raddr;
memseg->size = (mtype == LDC_SHADOW_MAP) ? exp_size : map_size;
memseg->npages = npages;
memseg->ncookies = ccount;
memseg->next_cookie = 0;
mhdl->mtype = mtype;
mhdl->perm = perm;
mhdl->status = LDC_MAPPED;
D1(ldcp->id, "ldc_mem_map: (0x%llx) mapped 0x%llx, ra=0x%llx, "
"va=0x%llx, pgs=0x%llx cookies=0x%llx\n",
ldcp->id, mhdl, memseg->raddr, memseg->vaddr,
memseg->npages, memseg->ncookies);
if (mtype == LDC_SHADOW_MAP)
base_off = 0;
if (raddr)
*raddr = (caddr_t)(memseg->raddr | base_off);
if (vaddr)
*vaddr = (caddr_t)((uintptr_t)memseg->vaddr | base_off);
mutex_exit(&ldcp->lock);
mutex_exit(&mhdl->lock);
return (0);
}
int
ldc_mem_unmap(ldc_mem_handle_t mhandle)
{
int i, rv;
ldc_mhdl_t *mhdl = (ldc_mhdl_t *)mhandle;
ldc_chan_t *ldcp;
ldc_memseg_t *memseg;
if (mhdl == 0 || mhdl->status != LDC_MAPPED) {
DWARN(DBG_ALL_LDCS,
"ldc_mem_unmap: (0x%llx) handle is not mapped\n",
mhandle);
return (EINVAL);
}
mutex_enter(&mhdl->lock);
ldcp = mhdl->ldcp;
memseg = mhdl->memseg;
D1(ldcp->id, "ldc_mem_unmap: (0x%llx) unmapping handle 0x%llx\n",
ldcp->id, mhdl);
if (mhdl->mtype == LDC_SHADOW_MAP && mhdl->myshadow) {
kmem_free(memseg->vaddr, memseg->size);
} else if (mhdl->mtype == LDC_DIRECT_MAP) {
hat_unload(kas.a_hat, memseg->vaddr, memseg->size,
HAT_UNLOAD_UNLOCK);
for (i = 0; i < memseg->npages; i++) {
rv = hv_ldc_unmap(memseg->pages[i].raddr);
if (rv) {
DWARN(DBG_ALL_LDCS,
"ldc_mem_map: (0x%lx) hv unmap err %d\n",
ldcp->id, rv);
}
}
vmem_free(heap_arena, (void *)memseg->vaddr, memseg->size);
}
kmem_free(memseg->pages, (sizeof (ldc_page_t) * memseg->npages));
kmem_free(memseg->cookies,
(sizeof (ldc_mem_cookie_t) * memseg->ncookies));
kmem_cache_free(ldcssp->memseg_cache, memseg);
mhdl->memseg = NULL;
mhdl->status = LDC_UNBOUND;
D1(ldcp->id, "ldc_mem_unmap: (0x%llx) unmapped handle 0x%llx\n",
ldcp->id, mhdl);
mutex_exit(&mhdl->lock);
return (0);
}
static int
i_ldc_mem_acquire_release(ldc_mem_handle_t mhandle, uint8_t direction,
uint64_t offset, size_t size)
{
int err;
ldc_mhdl_t *mhdl;
ldc_chan_t *ldcp;
ldc_memseg_t *memseg;
caddr_t local_vaddr;
size_t copy_size;
if (mhandle == 0) {
DWARN(DBG_ALL_LDCS,
"i_ldc_mem_acquire_release: invalid memory handle\n");
return (EINVAL);
}
mhdl = (ldc_mhdl_t *)mhandle;
mutex_enter(&mhdl->lock);
if (mhdl->status != LDC_MAPPED || mhdl->ldcp == NULL) {
DWARN(DBG_ALL_LDCS,
"i_ldc_mem_acquire_release: not mapped memory\n");
mutex_exit(&mhdl->lock);
return (EINVAL);
}
if (mhdl->mtype == LDC_DIRECT_MAP) {
mutex_exit(&mhdl->lock);
return (0);
}
if ((direction == LDC_COPY_IN && (mhdl->perm & LDC_MEM_R) == 0) ||
(direction == LDC_COPY_OUT && (mhdl->perm & LDC_MEM_W) == 0)) {
mutex_exit(&mhdl->lock);
return (0);
}
if (offset >= mhdl->memseg->size ||
(offset + size) > mhdl->memseg->size) {
DWARN(DBG_ALL_LDCS,
"i_ldc_mem_acquire_release: memory out of range\n");
mutex_exit(&mhdl->lock);
return (EINVAL);
}
ldcp = mhdl->ldcp;
memseg = mhdl->memseg;
if (mhdl->mtype == LDC_SHADOW_MAP) {
local_vaddr = memseg->vaddr + offset;
copy_size = size;
err = ldc_mem_copy((ldc_handle_t)ldcp, local_vaddr, offset,
©_size, memseg->cookies, memseg->ncookies,
direction);
if (err || copy_size != size) {
DWARN(ldcp->id,
"i_ldc_mem_acquire_release: copy failed\n");
mutex_exit(&mhdl->lock);
return (err);
}
}
mutex_exit(&mhdl->lock);
return (0);
}
int
ldc_mem_acquire(ldc_mem_handle_t mhandle, uint64_t offset, uint64_t size)
{
return (i_ldc_mem_acquire_release(mhandle, LDC_COPY_IN, offset, size));
}
int
ldc_mem_release(ldc_mem_handle_t mhandle, uint64_t offset, uint64_t size)
{
return (i_ldc_mem_acquire_release(mhandle, LDC_COPY_OUT, offset, size));
}
int
ldc_mem_dring_create(uint32_t len, uint32_t dsize, ldc_dring_handle_t *dhandle)
{
ldc_dring_t *dringp;
size_t size = (dsize * len);
D1(DBG_ALL_LDCS, "ldc_mem_dring_create: len=0x%x, size=0x%x\n",
len, dsize);
if (dhandle == 0) {
DWARN(DBG_ALL_LDCS, "ldc_mem_dring_create: invalid dhandle\n");
return (EINVAL);
}
if (len == 0) {
DWARN(DBG_ALL_LDCS, "ldc_mem_dring_create: invalid length\n");
return (EINVAL);
}
if (dsize == 0 || (dsize & 0x7)) {
DWARN(DBG_ALL_LDCS, "ldc_mem_dring_create: invalid size\n");
return (EINVAL);
}
*dhandle = 0;
dringp = kmem_zalloc(sizeof (ldc_dring_t), KM_SLEEP);
dringp->length = len;
dringp->dsize = dsize;
dringp->size = (size & MMU_PAGEMASK);
if (size & MMU_PAGEOFFSET)
dringp->size += MMU_PAGESIZE;
dringp->status = LDC_UNBOUND;
dringp->base = kmem_zalloc(dringp->size, KM_SLEEP);
mutex_init(&dringp->lock, NULL, MUTEX_DRIVER, NULL);
mutex_enter(&ldcssp->lock);
dringp->next = ldcssp->dring_list;
ldcssp->dring_list = dringp;
mutex_exit(&ldcssp->lock);
*dhandle = (ldc_dring_handle_t)dringp;
D1(DBG_ALL_LDCS, "ldc_mem_dring_create: dring allocated\n");
return (0);
}
int
ldc_mem_dring_destroy(ldc_dring_handle_t dhandle)
{
ldc_dring_t *dringp;
ldc_dring_t *tmp_dringp;
D1(DBG_ALL_LDCS, "ldc_mem_dring_destroy: entered\n");
if (dhandle == 0) {
DWARN(DBG_ALL_LDCS,
"ldc_mem_dring_destroy: invalid desc ring handle\n");
return (EINVAL);
}
dringp = (ldc_dring_t *)dhandle;
if (dringp->status == LDC_BOUND) {
DWARN(DBG_ALL_LDCS,
"ldc_mem_dring_destroy: desc ring is bound\n");
return (EACCES);
}
mutex_enter(&dringp->lock);
mutex_enter(&ldcssp->lock);
tmp_dringp = ldcssp->dring_list;
if (tmp_dringp == dringp) {
ldcssp->dring_list = dringp->next;
dringp->next = NULL;
} else {
while (tmp_dringp != NULL) {
if (tmp_dringp->next == dringp) {
tmp_dringp->next = dringp->next;
dringp->next = NULL;
break;
}
tmp_dringp = tmp_dringp->next;
}
if (tmp_dringp == NULL) {
DWARN(DBG_ALL_LDCS,
"ldc_mem_dring_destroy: invalid descriptor\n");
mutex_exit(&ldcssp->lock);
mutex_exit(&dringp->lock);
return (EINVAL);
}
}
mutex_exit(&ldcssp->lock);
kmem_free(dringp->base, dringp->size);
mutex_exit(&dringp->lock);
mutex_destroy(&dringp->lock);
kmem_free(dringp, sizeof (ldc_dring_t));
return (0);
}
int
ldc_mem_dring_bind(ldc_handle_t handle, ldc_dring_handle_t dhandle,
uint8_t mtype, uint8_t perm, ldc_mem_cookie_t *cookie, uint32_t *ccount)
{
int err;
ldc_chan_t *ldcp;
ldc_dring_t *dringp;
ldc_mem_handle_t mhandle;
if (handle == 0) {
DWARN(DBG_ALL_LDCS,
"ldc_mem_dring_bind: invalid channel handle\n");
return (EINVAL);
}
ldcp = (ldc_chan_t *)handle;
if (dhandle == 0) {
DWARN(DBG_ALL_LDCS,
"ldc_mem_dring_bind: invalid desc ring handle\n");
return (EINVAL);
}
dringp = (ldc_dring_t *)dhandle;
if (cookie == NULL) {
DWARN(ldcp->id,
"ldc_mem_dring_bind: invalid cookie arg\n");
return (EINVAL);
}
if ((mtype & (LDC_SHADOW_MAP|LDC_DIRECT_MAP)) == 0) {
DWARN(ldcp->id, "ldc_mem_dring_bind: invalid map type\n");
return (EINVAL);
}
if (!ldc_dring_shmem_hv_ok || !ldc_dring_shmem_enabled) {
mtype = LDC_SHADOW_MAP;
}
mutex_enter(&dringp->lock);
if (dringp->status == LDC_BOUND) {
DWARN(DBG_ALL_LDCS,
"ldc_mem_dring_bind: (0x%llx) descriptor ring is bound\n",
ldcp->id);
mutex_exit(&dringp->lock);
return (EINVAL);
}
if ((perm & LDC_MEM_RW) == 0) {
DWARN(DBG_ALL_LDCS,
"ldc_mem_dring_bind: invalid permissions\n");
mutex_exit(&dringp->lock);
return (EINVAL);
}
if ((mtype & (LDC_SHADOW_MAP|LDC_DIRECT_MAP|LDC_IO_MAP)) == 0) {
DWARN(DBG_ALL_LDCS, "ldc_mem_dring_bind: invalid type\n");
mutex_exit(&dringp->lock);
return (EINVAL);
}
dringp->ldcp = ldcp;
err = ldc_mem_alloc_handle(handle, &mhandle);
if (err || mhandle == 0) {
DWARN(DBG_ALL_LDCS,
"ldc_mem_dring_bind: (0x%llx) error allocating mhandle\n",
ldcp->id);
mutex_exit(&dringp->lock);
return (err);
}
dringp->mhdl = mhandle;
err = i_ldc_mem_bind_handle(mhandle, dringp->base, dringp->size,
mtype, perm, cookie, ccount);
if (err) {
DWARN(ldcp->id,
"ldc_mem_dring_bind: (0x%llx) error binding mhandle\n",
ldcp->id);
mutex_exit(&dringp->lock);
return (err);
}
if (*ccount > 1) {
(void) ldc_mem_unbind_handle(mhandle);
(void) ldc_mem_free_handle(mhandle);
dringp->ldcp = NULL;
dringp->mhdl = 0;
*ccount = 0;
mutex_exit(&dringp->lock);
return (EAGAIN);
}
mutex_enter(&ldcp->exp_dlist_lock);
dringp->ch_next = ldcp->exp_dring_list;
ldcp->exp_dring_list = dringp;
mutex_exit(&ldcp->exp_dlist_lock);
dringp->status = LDC_BOUND;
mutex_exit(&dringp->lock);
return (0);
}
int
ldc_mem_dring_nextcookie(ldc_dring_handle_t dhandle, ldc_mem_cookie_t *cookie)
{
int rv = 0;
ldc_dring_t *dringp;
ldc_chan_t *ldcp;
if (dhandle == 0) {
DWARN(DBG_ALL_LDCS,
"ldc_mem_dring_nextcookie: invalid desc ring handle\n");
return (EINVAL);
}
dringp = (ldc_dring_t *)dhandle;
mutex_enter(&dringp->lock);
if (dringp->status != LDC_BOUND) {
DWARN(DBG_ALL_LDCS,
"ldc_mem_dring_nextcookie: descriptor ring 0x%llx "
"is not bound\n", dringp);
mutex_exit(&dringp->lock);
return (EINVAL);
}
ldcp = dringp->ldcp;
if (cookie == NULL) {
DWARN(ldcp->id,
"ldc_mem_dring_nextcookie:(0x%llx) invalid cookie arg\n",
ldcp->id);
mutex_exit(&dringp->lock);
return (EINVAL);
}
rv = ldc_mem_nextcookie((ldc_mem_handle_t)dringp->mhdl, cookie);
mutex_exit(&dringp->lock);
return (rv);
}
int
ldc_mem_dring_unbind(ldc_dring_handle_t dhandle)
{
ldc_dring_t *dringp;
ldc_dring_t *tmp_dringp;
ldc_chan_t *ldcp;
if (dhandle == 0) {
DWARN(DBG_ALL_LDCS,
"ldc_mem_dring_unbind: invalid desc ring handle\n");
return (EINVAL);
}
dringp = (ldc_dring_t *)dhandle;
mutex_enter(&dringp->lock);
if (dringp->status == LDC_UNBOUND) {
DWARN(DBG_ALL_LDCS,
"ldc_mem_dring_bind: descriptor ring 0x%llx is unbound\n",
dringp);
mutex_exit(&dringp->lock);
return (EINVAL);
}
ldcp = dringp->ldcp;
mutex_enter(&ldcp->exp_dlist_lock);
tmp_dringp = ldcp->exp_dring_list;
if (tmp_dringp == dringp) {
ldcp->exp_dring_list = dringp->ch_next;
dringp->ch_next = NULL;
} else {
while (tmp_dringp != NULL) {
if (tmp_dringp->ch_next == dringp) {
tmp_dringp->ch_next = dringp->ch_next;
dringp->ch_next = NULL;
break;
}
tmp_dringp = tmp_dringp->ch_next;
}
if (tmp_dringp == NULL) {
DWARN(DBG_ALL_LDCS,
"ldc_mem_dring_unbind: invalid descriptor\n");
mutex_exit(&ldcp->exp_dlist_lock);
mutex_exit(&dringp->lock);
return (EINVAL);
}
}
mutex_exit(&ldcp->exp_dlist_lock);
(void) ldc_mem_unbind_handle((ldc_mem_handle_t)dringp->mhdl);
(void) ldc_mem_free_handle((ldc_mem_handle_t)dringp->mhdl);
dringp->ldcp = NULL;
dringp->mhdl = 0;
dringp->status = LDC_UNBOUND;
mutex_exit(&dringp->lock);
return (0);
}
#ifdef DEBUG
void
i_ldc_mem_inject_dring_clear(ldc_chan_t *ldcp)
{
ldc_dring_t *dp;
ldc_mhdl_t *mhdl;
ldc_mtbl_t *mtbl;
ldc_memseg_t *memseg;
uint64_t cookie_addr;
uint64_t pg_shift, pg_size_code;
int i, rv, retries;
if ((mtbl = ldcp->mtbl) == NULL)
return;
mutex_enter(&mtbl->lock);
mutex_enter(&ldcp->exp_dlist_lock);
for (dp = ldcp->exp_dring_list; dp != NULL; dp = dp->ch_next) {
if ((mhdl = (ldc_mhdl_t *)dp->mhdl) == NULL)
continue;
if ((memseg = mhdl->memseg) == NULL)
continue;
for (i = 0; i < memseg->npages; i++) {
memseg->pages[i].mte->entry.ll = 0;
pg_size_code = page_szc(MMU_PAGESIZE);
pg_shift = page_get_shift(pg_size_code);
cookie_addr = IDX2COOKIE(memseg->pages[i].index,
pg_size_code, pg_shift);
retries = 0;
do {
rv = hv_ldc_revoke(ldcp->id, cookie_addr,
memseg->pages[i].mte->cookie);
if (rv != H_EWOULDBLOCK)
break;
drv_usecwait(ldc_delay);
} while (retries++ < ldc_max_retries);
if (rv != 0) {
DWARN(ldcp->id,
"i_ldc_mem_inject_dring_clear(): "
"hv_ldc_revoke failed: "
"channel: 0x%lx, cookie addr: 0x%p,"
"cookie: 0x%lx, rv: %d",
ldcp->id, cookie_addr,
memseg->pages[i].mte->cookie, rv);
}
mtbl->num_avail++;
}
}
mutex_exit(&ldcp->exp_dlist_lock);
mutex_exit(&mtbl->lock);
}
#endif
int
ldc_mem_dring_info(ldc_dring_handle_t dhandle, ldc_mem_info_t *minfo)
{
ldc_dring_t *dringp;
int rv;
if (dhandle == 0) {
DWARN(DBG_ALL_LDCS,
"ldc_mem_dring_info: invalid desc ring handle\n");
return (EINVAL);
}
dringp = (ldc_dring_t *)dhandle;
mutex_enter(&dringp->lock);
if (dringp->mhdl) {
rv = ldc_mem_info(dringp->mhdl, minfo);
if (rv) {
DWARN(DBG_ALL_LDCS,
"ldc_mem_dring_info: error reading mem info\n");
mutex_exit(&dringp->lock);
return (rv);
}
} else {
minfo->vaddr = dringp->base;
minfo->raddr = 0;
minfo->status = dringp->status;
}
mutex_exit(&dringp->lock);
return (0);
}
int
ldc_mem_dring_map(ldc_handle_t handle, ldc_mem_cookie_t *cookie,
uint32_t ccount, uint32_t len, uint32_t dsize, uint8_t mtype,
ldc_dring_handle_t *dhandle)
{
int err;
ldc_chan_t *ldcp = (ldc_chan_t *)handle;
ldc_mem_handle_t mhandle;
ldc_dring_t *dringp;
size_t dring_size;
if (dhandle == 0) {
DWARN(DBG_ALL_LDCS,
"ldc_mem_dring_map: invalid dhandle\n");
return (EINVAL);
}
if (handle == 0) {
DWARN(DBG_ALL_LDCS,
"ldc_mem_dring_map: invalid channel handle\n");
return (EINVAL);
}
ldcp = (ldc_chan_t *)handle;
if (cookie == NULL) {
DWARN(ldcp->id,
"ldc_mem_dring_map: (0x%llx) invalid cookie\n",
ldcp->id);
return (EINVAL);
}
ASSERT(ccount == 1);
if (cookie->size < (dsize * len)) {
DWARN(ldcp->id,
"ldc_mem_dring_map: (0x%llx) invalid dsize/len\n",
ldcp->id);
return (EINVAL);
}
if ((mtype & (LDC_SHADOW_MAP|LDC_DIRECT_MAP)) == 0) {
DWARN(ldcp->id, "ldc_mem_dring_map: invalid map type\n");
return (EINVAL);
}
if (!ldc_dring_shmem_hv_ok || !ldc_dring_shmem_enabled) {
mtype = LDC_SHADOW_MAP;
}
*dhandle = 0;
dringp = kmem_zalloc(sizeof (ldc_dring_t), KM_SLEEP);
D1(ldcp->id,
"ldc_mem_dring_map: 0x%x,0x%x,0x%x,0x%llx,0x%llx\n",
mtype, len, dsize, cookie->addr, cookie->size);
dringp->length = len;
dringp->dsize = dsize;
dring_size = len * dsize;
dringp->size = (dring_size & MMU_PAGEMASK);
if (dring_size & MMU_PAGEOFFSET)
dringp->size += MMU_PAGESIZE;
dringp->ldcp = ldcp;
err = ldc_mem_alloc_handle(handle, &mhandle);
if (err || mhandle == 0) {
DWARN(DBG_ALL_LDCS,
"ldc_mem_dring_map: cannot alloc hdl err=%d\n",
err);
kmem_free(dringp, sizeof (ldc_dring_t));
return (ENOMEM);
}
dringp->mhdl = mhandle;
dringp->base = NULL;
err = i_ldc_mem_map(mhandle, cookie, ccount, mtype, LDC_MEM_RW,
&(dringp->base), NULL);
if (err || dringp->base == NULL) {
DWARN(DBG_ALL_LDCS,
"ldc_mem_dring_map: cannot map desc ring err=%d\n", err);
(void) ldc_mem_free_handle(mhandle);
kmem_free(dringp, sizeof (ldc_dring_t));
return (ENOMEM);
}
mutex_init(&dringp->lock, NULL, MUTEX_DRIVER, NULL);
mutex_enter(&ldcp->imp_dlist_lock);
dringp->ch_next = ldcp->imp_dring_list;
ldcp->imp_dring_list = dringp;
mutex_exit(&ldcp->imp_dlist_lock);
dringp->status = LDC_MAPPED;
*dhandle = (ldc_dring_handle_t)dringp;
return (0);
}
int
ldc_mem_dring_unmap(ldc_dring_handle_t dhandle)
{
ldc_dring_t *dringp;
ldc_dring_t *tmp_dringp;
ldc_chan_t *ldcp;
if (dhandle == 0) {
DWARN(DBG_ALL_LDCS,
"ldc_mem_dring_unmap: invalid desc ring handle\n");
return (EINVAL);
}
dringp = (ldc_dring_t *)dhandle;
if (dringp->status != LDC_MAPPED) {
DWARN(DBG_ALL_LDCS,
"ldc_mem_dring_unmap: not a mapped desc ring\n");
return (EINVAL);
}
mutex_enter(&dringp->lock);
ldcp = dringp->ldcp;
mutex_enter(&ldcp->imp_dlist_lock);
tmp_dringp = ldcp->imp_dring_list;
if (tmp_dringp == dringp) {
ldcp->imp_dring_list = dringp->ch_next;
dringp->ch_next = NULL;
} else {
while (tmp_dringp != NULL) {
if (tmp_dringp->ch_next == dringp) {
tmp_dringp->ch_next = dringp->ch_next;
dringp->ch_next = NULL;
break;
}
tmp_dringp = tmp_dringp->ch_next;
}
if (tmp_dringp == NULL) {
DWARN(DBG_ALL_LDCS,
"ldc_mem_dring_unmap: invalid descriptor\n");
mutex_exit(&ldcp->imp_dlist_lock);
mutex_exit(&dringp->lock);
return (EINVAL);
}
}
mutex_exit(&ldcp->imp_dlist_lock);
(void) ldc_mem_unmap(dringp->mhdl);
(void) ldc_mem_free_handle((ldc_mem_handle_t)dringp->mhdl);
dringp->status = 0;
dringp->ldcp = NULL;
mutex_exit(&dringp->lock);
mutex_destroy(&dringp->lock);
kmem_free(dringp, sizeof (ldc_dring_t));
return (0);
}
static int
i_ldc_dring_acquire_release(ldc_dring_handle_t dhandle,
uint8_t direction, uint64_t start, uint64_t end)
{
int err;
ldc_dring_t *dringp;
ldc_chan_t *ldcp;
ldc_mhdl_t *mhdl;
uint64_t soff;
size_t copy_size;
if (dhandle == 0) {
DWARN(DBG_ALL_LDCS,
"i_ldc_dring_acquire_release: invalid desc ring handle\n");
return (EINVAL);
}
dringp = (ldc_dring_t *)dhandle;
mutex_enter(&dringp->lock);
if (dringp->status != LDC_MAPPED || dringp->ldcp == NULL) {
DWARN(DBG_ALL_LDCS,
"i_ldc_dring_acquire_release: not a mapped desc ring\n");
mutex_exit(&dringp->lock);
return (EINVAL);
}
if (start >= dringp->length || end >= dringp->length) {
DWARN(DBG_ALL_LDCS,
"i_ldc_dring_acquire_release: index out of range\n");
mutex_exit(&dringp->lock);
return (EINVAL);
}
mhdl = (ldc_mhdl_t *)dringp->mhdl;
if (mhdl == NULL) {
DWARN(DBG_ALL_LDCS,
"i_ldc_dring_acquire_release: invalid memory handle\n");
mutex_exit(&dringp->lock);
return (EINVAL);
}
if (mhdl->mtype != LDC_SHADOW_MAP) {
DWARN(DBG_ALL_LDCS,
"i_ldc_dring_acquire_release: invalid mtype: %d\n",
mhdl->mtype);
mutex_exit(&dringp->lock);
return (EINVAL);
}
ldcp = dringp->ldcp;
copy_size = (start <= end) ? (((end - start) + 1) * dringp->dsize) :
((dringp->length - start) * dringp->dsize);
soff = (start * dringp->dsize);
D1(ldcp->id, "i_ldc_dring_acquire_release: c1 off=0x%llx sz=0x%llx\n",
soff, copy_size);
err = i_ldc_mem_acquire_release((ldc_mem_handle_t)dringp->mhdl,
direction, soff, copy_size);
if (err) {
DWARN(ldcp->id,
"i_ldc_dring_acquire_release: copy failed\n");
mutex_exit(&dringp->lock);
return (err);
}
if (start > end) {
copy_size = ((end + 1) * dringp->dsize);
soff = 0;
D1(ldcp->id, "i_ldc_dring_acquire_release: c2 "
"off=0x%llx sz=0x%llx\n", soff, copy_size);
err = i_ldc_mem_acquire_release((ldc_mem_handle_t)dringp->mhdl,
direction, soff, copy_size);
if (err) {
DWARN(ldcp->id,
"i_ldc_dring_acquire_release: copy failed\n");
mutex_exit(&dringp->lock);
return (err);
}
}
mutex_exit(&dringp->lock);
return (0);
}
int
ldc_mem_dring_acquire(ldc_dring_handle_t dhandle, uint64_t start, uint64_t end)
{
return (i_ldc_dring_acquire_release(dhandle, LDC_COPY_IN, start, end));
}
int
ldc_mem_dring_release(ldc_dring_handle_t dhandle, uint64_t start, uint64_t end)
{
return (i_ldc_dring_acquire_release(dhandle, LDC_COPY_OUT, start, end));
}