#include <sys/param.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/proc.h>
#include <machine/bus.h>
#include <uvm/uvm_extern.h>
#ifndef FORCE_BOUNCE_BUFFER
#define FORCE_BOUNCE_BUFFER 0
#endif
int _bus_dmamap_load_buffer(bus_dma_tag_t, bus_dmamap_t, void *, bus_size_t,
struct proc *, int, paddr_t *, int *, int *, int);
int
_bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
{
struct bus_dmamap *map;
struct pglist mlist;
struct vm_page **pg, *pgnext;
size_t mapsize, sz, ssize;
vaddr_t va, sva;
void *mapstore;
int npages, error;
const struct kmem_dyn_mode *kd;
int use_bounce_buffer = cpu_sev_guestmode || FORCE_BOUNCE_BUFFER;
mapsize = sizeof(struct bus_dmamap) +
(sizeof(bus_dma_segment_t) * (nsegments - 1));
if (use_bounce_buffer) {
npages = round_page(size) / PAGE_SIZE + 1;
if (npages < nsegments)
npages = nsegments;
mapsize += sizeof(struct vm_page *) * npages;
}
mapstore = malloc(mapsize, M_DEVBUF,
(flags & BUS_DMA_NOWAIT) ? (M_NOWAIT|M_ZERO) : (M_WAITOK|M_ZERO));
if (mapstore == NULL)
return (ENOMEM);
map = (struct bus_dmamap *)mapstore;
map->_dm_size = size;
map->_dm_segcnt = nsegments;
map->_dm_maxsegsz = maxsegsz;
map->_dm_boundary = boundary;
if (use_bounce_buffer) {
map->_dm_pages = (void *)&map->dm_segs[nsegments];
map->_dm_npages = npages;
}
map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
if (!use_bounce_buffer) {
*dmamp = map;
return (0);
}
sz = npages << PGSHIFT;
kd = flags & BUS_DMA_NOWAIT ? &kd_trylock : &kd_waitok;
va = (vaddr_t)km_alloc(sz, &kv_any, &kp_none, kd);
if (va == 0) {
map->_dm_npages = 0;
free(map, M_DEVBUF, mapsize);
return (ENOMEM);
}
TAILQ_INIT(&mlist);
error = uvm_pglistalloc(sz, 0, -1, PAGE_SIZE, 0, &mlist, nsegments,
(flags & BUS_DMA_NOWAIT) ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK);
if (error) {
map->_dm_npages = 0;
km_free((void *)va, sz, &kv_any, &kp_none);
free(map, M_DEVBUF, mapsize);
return (ENOMEM);
}
sva = va;
ssize = sz;
pgnext = TAILQ_FIRST(&mlist);
for (pg = map->_dm_pages; npages--; va += PAGE_SIZE, pg++) {
*pg = pgnext;
error = pmap_enter(pmap_kernel(), va, VM_PAGE_TO_PHYS(*pg),
PROT_READ | PROT_WRITE,
PROT_READ | PROT_WRITE | PMAP_WIRED |
PMAP_CANFAIL | PMAP_NOCRYPT);
if (error) {
pmap_update(pmap_kernel());
map->_dm_npages = 0;
km_free((void *)sva, ssize, &kv_any, &kp_none);
free(map, M_DEVBUF, mapsize);
uvm_pglistfree(&mlist);
return (ENOMEM);
}
pgnext = TAILQ_NEXT(*pg, pageq);
bzero((void *)va, PAGE_SIZE);
}
pmap_update(pmap_kernel());
map->_dm_pgva = sva;
*dmamp = map;
return (0);
}
void
_bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
{
size_t mapsize;
struct vm_page **pg;
struct pglist mlist;
int use_bounce_buffer = cpu_sev_guestmode || FORCE_BOUNCE_BUFFER;
if (map->_dm_pgva) {
km_free((void *)map->_dm_pgva, map->_dm_npages << PGSHIFT,
&kv_any, &kp_none);
}
mapsize = sizeof(struct bus_dmamap) +
(sizeof(bus_dma_segment_t) * (map->_dm_segcnt - 1));
if (use_bounce_buffer)
mapsize += sizeof(struct vm_page *) * map->_dm_npages;
if (map->_dm_pages) {
TAILQ_INIT(&mlist);
for (pg = map->_dm_pages; map->_dm_npages--; pg++) {
TAILQ_INSERT_TAIL(&mlist, *pg, pageq);
}
uvm_pglistfree(&mlist);
}
free(map, M_DEVBUF, mapsize);
}
int
_bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
bus_size_t buflen, struct proc *p, int flags)
{
bus_addr_t lastaddr = 0;
int seg, used, error;
map->dm_mapsize = 0;
map->dm_nsegs = 0;
if (buflen > map->_dm_size)
return (EINVAL);
seg = 0;
used = 0;
error = _bus_dmamap_load_buffer(t, map, buf, buflen, p, flags,
&lastaddr, &seg, &used, 1);
if (error == 0) {
map->dm_mapsize = buflen;
map->dm_nsegs = seg + 1;
map->_dm_nused = used;
}
return (error);
}
int
_bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
int flags)
{
paddr_t lastaddr = 0;
int seg, used, error, first;
struct mbuf *m;
map->dm_mapsize = 0;
map->dm_nsegs = 0;
#ifdef DIAGNOSTIC
if ((m0->m_flags & M_PKTHDR) == 0)
panic("_bus_dmamap_load_mbuf: no packet header");
#endif
if (m0->m_pkthdr.len > map->_dm_size)
return (EINVAL);
first = 1;
seg = 0;
used = 0;
error = 0;
for (m = m0; m != NULL && error == 0; m = m->m_next) {
if (m->m_len == 0)
continue;
error = _bus_dmamap_load_buffer(t, map, m->m_data, m->m_len,
NULL, flags, &lastaddr, &seg, &used, first);
first = 0;
}
if (error == 0) {
map->dm_mapsize = m0->m_pkthdr.len;
map->dm_nsegs = seg + 1;
map->_dm_nused = used;
}
return (error);
}
int
_bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
int flags)
{
paddr_t lastaddr = 0;
int seg, used, i, error, first;
bus_size_t minlen, resid;
struct proc *p = NULL;
struct iovec *iov;
caddr_t addr;
map->dm_mapsize = 0;
map->dm_nsegs = 0;
resid = uio->uio_resid;
iov = uio->uio_iov;
if (uio->uio_segflg == UIO_USERSPACE) {
p = uio->uio_procp;
#ifdef DIAGNOSTIC
if (p == NULL)
panic("_bus_dmamap_load_uio: USERSPACE but no proc");
#endif
}
first = 1;
seg = 0;
used = 0;
error = 0;
for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
addr = (caddr_t)iov[i].iov_base;
error = _bus_dmamap_load_buffer(t, map, addr, minlen,
p, flags, &lastaddr, &seg, &used, first);
first = 0;
resid -= minlen;
}
if (error == 0) {
map->dm_mapsize = uio->uio_resid;
map->dm_nsegs = seg + 1;
map->_dm_nused = used;
}
return (error);
}
int
_bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs,
int nsegs, bus_size_t size, int flags)
{
bus_addr_t paddr, baddr, bmask, lastaddr = 0;
bus_size_t plen, sgsize, mapsize;
int first = 1;
int i, seg = 0;
int page, off;
vaddr_t pgva, vaddr;
int use_bounce_buffer = cpu_sev_guestmode || FORCE_BOUNCE_BUFFER;
map->dm_mapsize = 0;
map->dm_nsegs = 0;
if (nsegs > map->_dm_segcnt || size > map->_dm_size)
return (EINVAL);
page = 0;
pgva = -1;
vaddr = -1;
mapsize = size;
bmask = ~(map->_dm_boundary - 1);
for (i = 0; i < nsegs && size > 0; i++) {
paddr = segs[i].ds_addr;
plen = MIN(segs[i].ds_len, size);
while (plen > 0) {
if (use_bounce_buffer) {
if (page >= map->_dm_npages)
return (EFBIG);
off = paddr & PAGE_MASK;
vaddr = PMAP_DIRECT_MAP(paddr);
pgva = map->_dm_pgva + (page << PGSHIFT) + off;
page++;
}
sgsize = PAGE_SIZE - ((u_long)paddr & PGOFSET);
if (plen < sgsize)
sgsize = plen;
if (paddr > dma_constraint.ucr_high &&
(map->_dm_flags & BUS_DMA_64BIT) == 0)
panic("Non dma-reachable buffer at "
"paddr %#lx(raw)", paddr);
if (map->_dm_boundary > 0) {
baddr = (paddr + map->_dm_boundary) & bmask;
if (sgsize > (baddr - paddr))
sgsize = (baddr - paddr);
}
if (first) {
map->dm_segs[seg].ds_addr = paddr;
map->dm_segs[seg].ds_len = sgsize;
map->dm_segs[seg]._ds_va = vaddr;
map->dm_segs[seg]._ds_bounce_va = pgva;
first = 0;
} else {
if (paddr == lastaddr &&
(map->dm_segs[seg].ds_len + sgsize) <=
map->_dm_maxsegsz &&
(map->_dm_boundary == 0 ||
(map->dm_segs[seg].ds_addr & bmask) ==
(paddr & bmask)) &&
(!use_bounce_buffer ||
(map->dm_segs[seg]._ds_va +
map->dm_segs[seg].ds_len) == vaddr)) {
map->dm_segs[seg].ds_len += sgsize;
} else {
if (++seg >= map->_dm_segcnt)
return (EINVAL);
map->dm_segs[seg].ds_addr = paddr;
map->dm_segs[seg].ds_len = sgsize;
map->dm_segs[seg]._ds_va = vaddr;
map->dm_segs[seg]._ds_bounce_va = pgva;
}
}
paddr += sgsize;
plen -= sgsize;
size -= sgsize;
lastaddr = paddr;
}
}
map->dm_mapsize = mapsize;
map->dm_nsegs = seg + 1;
return (0);
}
void
_bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
{
map->dm_mapsize = 0;
map->dm_nsegs = 0;
map->_dm_nused = 0;
}
void
_bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t addr,
bus_size_t size, int op)
{
bus_dma_segment_t *sg;
int i, off = addr;
bus_size_t l;
int use_bounce_buffer = cpu_sev_guestmode || FORCE_BOUNCE_BUFFER;
if (!use_bounce_buffer)
return;
for (i = map->_dm_segcnt, sg = map->dm_segs; size && i; i--, sg++) {
if (off >= sg->ds_len) {
off -= sg->ds_len;
continue;
}
l = sg->ds_len - off;
if (l > size)
l = size;
size -= l;
if (op & BUS_DMASYNC_POSTREAD) {
bcopy((void *)(sg->_ds_bounce_va + off),
(void *)(sg->_ds_va + off), l);
}
if (op & BUS_DMASYNC_PREWRITE) {
bcopy((void *)(sg->_ds_va + off),
(void *)(sg->_ds_bounce_va + off), l);
}
off = 0;
}
}
int
_bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
int flags)
{
paddr_t low, high;
if (flags & BUS_DMA_64BIT) {
low = no_constraint.ucr_low;
high = no_constraint.ucr_high;
} else {
low = dma_constraint.ucr_low;
high = dma_constraint.ucr_high;
}
return _bus_dmamem_alloc_range(t, size, alignment, boundary,
segs, nsegs, rsegs, flags, low, high);
}
void
_bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
{
struct vm_page *m;
bus_addr_t addr;
struct pglist mlist;
int curseg;
TAILQ_INIT(&mlist);
for (curseg = 0; curseg < nsegs; curseg++) {
for (addr = segs[curseg].ds_addr;
addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
addr += PAGE_SIZE) {
m = PHYS_TO_VM_PAGE(addr);
TAILQ_INSERT_TAIL(&mlist, m, pageq);
}
}
uvm_pglistfree(&mlist);
}
int
_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
size_t size, caddr_t *kvap, int flags)
{
vaddr_t va, sva;
size_t ssize;
bus_addr_t addr;
int curseg, pmapflags = 0, error;
const struct kmem_dyn_mode *kd;
if (nsegs == 1 && (flags & BUS_DMA_NOCACHE) == 0) {
*kvap = (caddr_t)PMAP_DIRECT_MAP(segs[0].ds_addr);
return (0);
}
if (flags & BUS_DMA_NOCACHE)
pmapflags |= PMAP_NOCACHE;
size = round_page(size);
kd = flags & BUS_DMA_NOWAIT ? &kd_trylock : &kd_waitok;
va = (vaddr_t)km_alloc(size, &kv_any, &kp_none, kd);
if (va == 0)
return (ENOMEM);
*kvap = (caddr_t)va;
sva = va;
ssize = size;
for (curseg = 0; curseg < nsegs; curseg++) {
for (addr = segs[curseg].ds_addr;
addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
if (size == 0)
panic("_bus_dmamem_map: size botch");
error = pmap_enter(pmap_kernel(), va, addr | pmapflags,
PROT_READ | PROT_WRITE,
PROT_READ | PROT_WRITE | PMAP_WIRED | PMAP_CANFAIL);
if (error) {
pmap_update(pmap_kernel());
km_free((void *)sva, ssize, &kv_any, &kp_none);
return (error);
}
}
}
pmap_update(pmap_kernel());
return (0);
}
void
_bus_dmamem_unmap(bus_dma_tag_t t, caddr_t kva, size_t size)
{
#ifdef DIAGNOSTIC
if ((u_long)kva & PGOFSET)
panic("_bus_dmamem_unmap");
#endif
if (kva >= (caddr_t)PMAP_DIRECT_BASE && kva <= (caddr_t)PMAP_DIRECT_END)
return;
km_free(kva, round_page(size), &kv_any, &kp_none);
}
paddr_t
_bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, off_t off,
int prot, int flags)
{
int i, pmapflags = 0;
if (flags & BUS_DMA_NOCACHE)
pmapflags |= PMAP_NOCACHE;
for (i = 0; i < nsegs; i++) {
#ifdef DIAGNOSTIC
if (off & PGOFSET)
panic("_bus_dmamem_mmap: offset unaligned");
if (segs[i].ds_addr & PGOFSET)
panic("_bus_dmamem_mmap: segment unaligned");
if (segs[i].ds_len & PGOFSET)
panic("_bus_dmamem_mmap: segment size not multiple"
" of page size");
#endif
if (off >= segs[i].ds_len) {
off -= segs[i].ds_len;
continue;
}
return ((segs[i].ds_addr + off) | pmapflags);
}
return (-1);
}
int
_bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
bus_size_t buflen, struct proc *p, int flags, paddr_t *lastaddrp,
int *segp, int *usedp, int first)
{
bus_size_t sgsize;
bus_addr_t curaddr, lastaddr, baddr, bmask;
vaddr_t pgva = -1, vaddr = (vaddr_t)buf;
int seg, page, off;
pmap_t pmap;
struct vm_page *pg;
int use_bounce_buffer = cpu_sev_guestmode || FORCE_BOUNCE_BUFFER;
if (p != NULL)
pmap = p->p_vmspace->vm_map.pmap;
else
pmap = pmap_kernel();
page = *usedp;
lastaddr = *lastaddrp;
bmask = ~(map->_dm_boundary - 1);
for (seg = *segp; buflen > 0 ; ) {
pmap_extract(pmap, vaddr, (paddr_t *)&curaddr);
if (curaddr > dma_constraint.ucr_high &&
(map->_dm_flags & BUS_DMA_64BIT) == 0)
panic("Non dma-reachable buffer at curaddr %#lx(raw)",
curaddr);
if (use_bounce_buffer) {
if (page >= map->_dm_npages)
return (EFBIG);
off = vaddr & PAGE_MASK;
pg = map->_dm_pages[page];
curaddr = VM_PAGE_TO_PHYS(pg) + off;
pgva = map->_dm_pgva + (page << PGSHIFT) + off;
page++;
}
sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
if (buflen < sgsize)
sgsize = buflen;
if (map->_dm_boundary > 0) {
baddr = (curaddr + map->_dm_boundary) & bmask;
if (sgsize > (baddr - curaddr))
sgsize = (baddr - curaddr);
}
if (first) {
map->dm_segs[seg].ds_addr = curaddr;
map->dm_segs[seg].ds_len = sgsize;
map->dm_segs[seg]._ds_va = vaddr;
map->dm_segs[seg]._ds_bounce_va = pgva;
first = 0;
} else {
if (curaddr == lastaddr &&
(map->dm_segs[seg].ds_len + sgsize) <=
map->_dm_maxsegsz &&
(map->_dm_boundary == 0 ||
(map->dm_segs[seg].ds_addr & bmask) ==
(curaddr & bmask)) &&
(!use_bounce_buffer || (map->dm_segs[seg]._ds_va +
map->dm_segs[seg].ds_len) == vaddr)) {
map->dm_segs[seg].ds_len += sgsize;
} else {
if (++seg >= map->_dm_segcnt)
break;
map->dm_segs[seg].ds_addr = curaddr;
map->dm_segs[seg].ds_len = sgsize;
map->dm_segs[seg]._ds_va = vaddr;
map->dm_segs[seg]._ds_bounce_va = pgva;
}
}
lastaddr = curaddr + sgsize;
vaddr += sgsize;
buflen -= sgsize;
}
*segp = seg;
*usedp = page;
*lastaddrp = lastaddr;
if (buflen != 0)
return (EFBIG);
return (0);
}
int
_bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
int flags, bus_addr_t low, bus_addr_t high)
{
paddr_t curaddr, lastaddr;
struct vm_page *m;
struct pglist mlist;
int curseg, error, plaflag;
size = round_page(size);
segs[0]._ds_boundary = boundary;
segs[0]._ds_align = alignment;
plaflag = flags & BUS_DMA_NOWAIT ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
if (flags & BUS_DMA_ZERO)
plaflag |= UVM_PLA_ZERO;
TAILQ_INIT(&mlist);
error = uvm_pglistalloc(size, low, high, alignment, boundary,
&mlist, nsegs, plaflag);
if (error)
return (error);
m = TAILQ_FIRST(&mlist);
curseg = 0;
lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
segs[curseg].ds_len = PAGE_SIZE;
for (m = TAILQ_NEXT(m, pageq); m != NULL; m = TAILQ_NEXT(m, pageq)) {
curaddr = VM_PAGE_TO_PHYS(m);
#ifdef DIAGNOSTIC
if (curseg == nsegs) {
printf("uvm_pglistalloc returned too many\n");
panic("_bus_dmamem_alloc_range");
}
if (curaddr < low || curaddr >= high) {
printf("uvm_pglistalloc returned non-sensical"
" address 0x%lx\n", curaddr);
panic("_bus_dmamem_alloc_range");
}
#endif
if (curaddr == (lastaddr + PAGE_SIZE))
segs[curseg].ds_len += PAGE_SIZE;
else {
curseg++;
segs[curseg].ds_addr = curaddr;
segs[curseg].ds_len = PAGE_SIZE;
}
lastaddr = curaddr;
}
*rsegs = curseg + 1;
return (0);
}