#include <sys/param.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/bus.h>
#include <sys/interrupt.h>
#include <sys/kernel.h>
#include <sys/ktr.h>
#include <sys/lock.h>
#include <sys/proc.h>
#include <sys/memdesc.h>
#include <sys/mutex.h>
#include <sys/sysctl.h>
#include <sys/uio.h>
#include <vm/vm.h>
#include <vm/vm_extern.h>
#include <vm/vm_kern.h>
#include <vm/vm_page.h>
#include <vm/vm_map.h>
#include <machine/atomic.h>
#include <machine/bus.h>
#include <machine/cpufunc.h>
#include <machine/md_var.h>
#include "iommu_if.h"
#define MAX_BPAGES MIN(8192, physmem/40)
struct bounce_page;
struct bounce_zone;
struct bus_dma_tag {
bus_size_t alignment;
bus_addr_t boundary;
bus_addr_t lowaddr;
bus_addr_t highaddr;
bus_size_t maxsize;
bus_size_t maxsegsz;
u_int nsegments;
int flags;
int map_count;
bus_dma_lock_t *lockfunc;
void *lockfuncarg;
struct bounce_zone *bounce_zone;
device_t iommu;
void *iommu_cookie;
};
static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"Busdma parameters");
struct bus_dmamap {
STAILQ_HEAD(, bounce_page) bpages;
int pagesneeded;
int pagesreserved;
bus_dma_tag_t dmat;
struct memdesc mem;
bus_dma_segment_t *segments;
int nsegs;
bus_dmamap_callback_t *callback;
void *callback_arg;
__sbintime_t queued_time;
STAILQ_ENTRY(bus_dmamap) links;
int contigalloc;
};
static MALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata");
#define dmat_alignment(dmat) ((dmat)->alignment)
#define dmat_bounce_flags(dmat) (0)
#define dmat_boundary(dmat) ((dmat)->boundary)
#define dmat_flags(dmat) ((dmat)->flags)
#define dmat_highaddr(dmat) ((dmat)->highaddr)
#define dmat_lowaddr(dmat) ((dmat)->lowaddr)
#define dmat_lockfunc(dmat) ((dmat)->lockfunc)
#define dmat_lockfuncarg(dmat) ((dmat)->lockfuncarg)
#define dmat_maxsegsz(dmat) ((dmat)->maxsegsz)
#define dmat_nsegments(dmat) ((dmat)->nsegments)
#include "../../kern/subr_busdma_bounce.c"
static __inline bool
must_bounce(bus_dma_tag_t dmat, bus_addr_t paddr)
{
if (dmat->iommu == NULL && paddr > dmat->lowaddr &&
paddr <= dmat->highaddr)
return (true);
if (!vm_addr_align_ok(paddr, dmat->alignment))
return (true);
return (false);
}
#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3
#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
int
bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
bus_addr_t boundary, bus_addr_t lowaddr,
bus_addr_t highaddr, bus_dma_filter_t *filter,
void *filterarg, bus_size_t maxsize, int nsegments,
bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
void *lockfuncarg, bus_dma_tag_t *dmat)
{
bus_dma_tag_t newtag;
int error = 0;
if (boundary != 0 && boundary < maxsegsz)
maxsegsz = boundary;
if (maxsegsz == 0) {
return (EINVAL);
}
if (filter != NULL || filterarg != NULL)
return (EINVAL);
*dmat = NULL;
newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF,
M_ZERO | M_NOWAIT);
if (newtag == NULL) {
CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
__func__, newtag, 0, error);
return (ENOMEM);
}
newtag->alignment = alignment;
newtag->boundary = boundary;
newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1);
newtag->maxsize = maxsize;
newtag->nsegments = nsegments;
newtag->maxsegsz = maxsegsz;
newtag->flags = flags;
newtag->map_count = 0;
if (lockfunc != NULL) {
newtag->lockfunc = lockfunc;
newtag->lockfuncarg = lockfuncarg;
} else {
newtag->lockfunc = _busdma_dflt_lock;
newtag->lockfuncarg = NULL;
}
if (parent != NULL) {
newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
if (newtag->boundary == 0)
newtag->boundary = parent->boundary;
else if (parent->boundary != 0)
newtag->boundary = MIN(parent->boundary,
newtag->boundary);
newtag->iommu = parent->iommu;
newtag->iommu_cookie = parent->iommu_cookie;
}
if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) && newtag->iommu == NULL)
newtag->flags |= BUS_DMA_COULD_BOUNCE;
if (newtag->alignment > 1)
newtag->flags |= BUS_DMA_COULD_BOUNCE;
if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
(flags & BUS_DMA_ALLOCNOW) != 0) {
struct bounce_zone *bz;
if ((error = alloc_bounce_zone(newtag)) != 0) {
free(newtag, M_DEVBUF);
return (error);
}
bz = newtag->bounce_zone;
if (ptoa(bz->total_bpages) < maxsize) {
int pages;
pages = atop(maxsize) - bz->total_bpages;
if (alloc_bounce_pages(newtag, pages) < pages)
error = ENOMEM;
}
newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
}
if (error != 0) {
free(newtag, M_DEVBUF);
} else {
*dmat = newtag;
}
CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
__func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
return (error);
}
void
bus_dma_template_clone(bus_dma_template_t *t, bus_dma_tag_t dmat)
{
if (t == NULL || dmat == NULL)
return;
t->alignment = dmat->alignment;
t->boundary = dmat->boundary;
t->lowaddr = dmat->lowaddr;
t->highaddr = dmat->highaddr;
t->maxsize = dmat->maxsize;
t->nsegments = dmat->nsegments;
t->maxsegsize = dmat->maxsegsz;
t->flags = dmat->flags;
t->lockfunc = dmat->lockfunc;
t->lockfuncarg = dmat->lockfuncarg;
}
int
bus_dma_tag_set_domain(bus_dma_tag_t dmat, int domain)
{
return (0);
}
int
bus_dma_tag_destroy(bus_dma_tag_t dmat)
{
int error = 0;
if (dmat != NULL) {
if (dmat->map_count != 0) {
error = EBUSY;
goto out;
}
free(dmat, M_DEVBUF);
}
out:
CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat, error);
return (error);
}
int
bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
{
int error;
error = 0;
*mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
M_NOWAIT | M_ZERO);
if (*mapp == NULL) {
CTR3(KTR_BUSDMA, "%s: tag %p error %d",
__func__, dmat, ENOMEM);
return (ENOMEM);
}
STAILQ_INIT(&((*mapp)->bpages));
if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
struct bounce_zone *bz;
int maxpages;
if (dmat->bounce_zone == NULL) {
if ((error = alloc_bounce_zone(dmat)) != 0)
return (error);
}
bz = dmat->bounce_zone;
if (dmat->alignment > 1)
maxpages = MAX_BPAGES;
else
maxpages = MIN(MAX_BPAGES, Maxmem -atop(dmat->lowaddr));
if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
|| (bz->map_count > 0 && bz->total_bpages < maxpages)) {
int pages;
pages = MAX(atop(dmat->maxsize), 1);
pages = MIN(maxpages - bz->total_bpages, pages);
pages = MAX(pages, 1);
if (alloc_bounce_pages(dmat, pages) < pages)
error = ENOMEM;
if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
if (error == 0)
dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
} else {
error = 0;
}
}
bz->map_count++;
}
(*mapp)->nsegs = 0;
(*mapp)->segments = (bus_dma_segment_t *)malloc(
sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
M_NOWAIT);
if ((*mapp)->segments == NULL) {
CTR3(KTR_BUSDMA, "%s: tag %p error %d",
__func__, dmat, ENOMEM);
return (ENOMEM);
}
if (error == 0)
dmat->map_count++;
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
__func__, dmat, dmat->flags, error);
return (error);
}
int
bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
{
if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
if (STAILQ_FIRST(&map->bpages) != NULL) {
CTR3(KTR_BUSDMA, "%s: tag %p error %d",
__func__, dmat, EBUSY);
return (EBUSY);
}
if (dmat->bounce_zone)
dmat->bounce_zone->map_count--;
}
free(map->segments, M_DEVBUF);
free(map, M_DEVBUF);
dmat->map_count--;
CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
return (0);
}
int
bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
bus_dmamap_t *mapp)
{
vm_memattr_t attr;
int mflags;
if (flags & BUS_DMA_NOWAIT)
mflags = M_NOWAIT;
else
mflags = M_WAITOK;
bus_dmamap_create(dmat, flags, mapp);
if (flags & BUS_DMA_ZERO)
mflags |= M_ZERO;
if (flags & BUS_DMA_NOCACHE)
attr = VM_MEMATTR_UNCACHEABLE;
else
attr = VM_MEMATTR_DEFAULT;
if ((dmat->maxsize <= PAGE_SIZE) &&
(dmat->alignment <= dmat->maxsize) &&
dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem) &&
attr == VM_MEMATTR_DEFAULT) {
*vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
} else {
*vaddr = kmem_alloc_contig(dmat->maxsize, mflags, 0ul,
dmat->lowaddr, dmat->alignment ? dmat->alignment : 1ul,
dmat->boundary, attr);
(*mapp)->contigalloc = 1;
}
if (*vaddr == NULL) {
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
__func__, dmat, dmat->flags, ENOMEM);
return (ENOMEM);
} else if (!vm_addr_align_ok(vtophys(*vaddr), dmat->alignment)) {
printf("bus_dmamem_alloc failed to align memory properly.\n");
}
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
__func__, dmat, dmat->flags, 0);
return (0);
}
void
bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
{
if (!map->contigalloc)
free(vaddr, M_DEVBUF);
else
kmem_free(vaddr, dmat->maxsize);
bus_dmamap_destroy(dmat, map);
CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
}
static void
_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
bus_size_t buflen, int flags)
{
bus_addr_t curaddr;
bus_size_t sgsize;
if (map->pagesneeded == 0) {
CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
"alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem),
dmat->boundary, dmat->alignment);
CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map, map->pagesneeded);
curaddr = buf;
while (buflen != 0) {
sgsize = buflen;
if (must_bounce(dmat, curaddr)) {
sgsize = MIN(sgsize,
PAGE_SIZE - (curaddr & PAGE_MASK));
map->pagesneeded++;
}
curaddr += sgsize;
buflen -= sgsize;
}
CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
}
}
static void
_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
void *buf, bus_size_t buflen, int flags)
{
vm_offset_t vaddr;
vm_offset_t vendaddr;
bus_addr_t paddr;
if (map->pagesneeded == 0) {
CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
"alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem),
dmat->boundary, dmat->alignment);
CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map, map->pagesneeded);
vaddr = (vm_offset_t)buf;
vendaddr = (vm_offset_t)buf + buflen;
while (vaddr < vendaddr) {
bus_size_t sg_len;
sg_len = MIN(vendaddr - vaddr,
PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK));
if (pmap == kernel_pmap)
paddr = pmap_kextract(vaddr);
else
paddr = pmap_extract(pmap, vaddr);
if (must_bounce(dmat, paddr)) {
sg_len = roundup2(sg_len, dmat->alignment);
map->pagesneeded++;
}
vaddr += sg_len;
}
CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
}
}
int
_bus_dmamap_load_phys(bus_dma_tag_t dmat,
bus_dmamap_t map,
vm_paddr_t buf, bus_size_t buflen,
int flags,
bus_dma_segment_t *segs,
int *segp)
{
bus_addr_t curaddr;
bus_size_t sgsize;
int error;
if (segs == NULL)
segs = map->segments;
if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
_bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
if (map->pagesneeded != 0) {
error = _bus_dmamap_reserve_pages(dmat, map, flags);
if (error)
return (error);
}
}
while (buflen > 0) {
curaddr = buf;
sgsize = buflen;
if (map->pagesneeded != 0 && must_bounce(dmat, curaddr)) {
sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK));
curaddr = add_bounce_page(dmat, map, 0, curaddr,
sgsize);
}
if (!_bus_dmamap_addsegs(dmat, map, curaddr, sgsize, segs,
segp))
break;
buf += sgsize;
buflen -= sgsize;
}
return (buflen != 0 ? EFBIG : 0);
}
int
_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
bus_dma_segment_t *segs, int *segp)
{
return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags,
segs, segp));
}
int
_bus_dmamap_load_buffer(bus_dma_tag_t dmat,
bus_dmamap_t map,
void *buf, bus_size_t buflen,
pmap_t pmap,
int flags,
bus_dma_segment_t *segs,
int *segp)
{
bus_size_t sgsize;
bus_addr_t curaddr;
vm_offset_t kvaddr, vaddr;
int error;
if (segs == NULL)
segs = map->segments;
if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
_bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
if (map->pagesneeded != 0) {
error = _bus_dmamap_reserve_pages(dmat, map, flags);
if (error)
return (error);
}
}
vaddr = (vm_offset_t)buf;
while (buflen > 0) {
if (pmap == kernel_pmap) {
curaddr = pmap_kextract(vaddr);
kvaddr = vaddr;
} else {
curaddr = pmap_extract(pmap, vaddr);
kvaddr = 0;
}
sgsize = MIN(buflen, PAGE_SIZE - (curaddr & PAGE_MASK));
if (map->pagesneeded != 0 && must_bounce(dmat, curaddr)) {
sgsize = roundup2(sgsize, dmat->alignment);
sgsize = MIN(sgsize, buflen);
curaddr = add_bounce_page(dmat, map, kvaddr, curaddr,
sgsize);
}
if (!_bus_dmamap_addsegs(dmat, map, curaddr, sgsize, segs,
segp))
break;
vaddr += sgsize;
buflen -= MIN(sgsize, buflen);
}
return (buflen != 0 ? EFBIG : 0);
}
void
_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
struct memdesc *mem, bus_dmamap_callback_t *callback,
void *callback_arg)
{
if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
map->dmat = dmat;
map->mem = *mem;
map->callback = callback;
map->callback_arg = callback_arg;
}
}
bus_dma_segment_t *
_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
bus_dma_segment_t *segs, int nsegs, int error)
{
map->nsegs = nsegs;
if (segs != NULL)
memcpy(map->segments, segs, map->nsegs*sizeof(segs[0]));
if (dmat->iommu != NULL)
IOMMU_MAP(dmat->iommu, map->segments, &map->nsegs,
dmat->lowaddr, dmat->highaddr, dmat->alignment,
dmat->boundary, dmat->iommu_cookie);
if (segs != NULL)
memcpy(segs, map->segments, map->nsegs*sizeof(segs[0]));
else
segs = map->segments;
return (segs);
}
void
bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
{
if (dmat->iommu) {
IOMMU_UNMAP(dmat->iommu, map->segments, map->nsegs, dmat->iommu_cookie);
map->nsegs = 0;
}
free_bounce_pages(dmat, map);
}
void
bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
{
struct bounce_page *bpage;
vm_offset_t datavaddr, tempvaddr;
if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
"performing bounce", __func__, dmat, dmat->flags, op);
if (op & BUS_DMASYNC_PREWRITE) {
while (bpage != NULL) {
tempvaddr = 0;
datavaddr = bpage->datavaddr;
if (datavaddr == 0) {
tempvaddr = pmap_quick_enter_page(
bpage->datapage);
datavaddr = tempvaddr |
bpage->dataoffs;
}
bcopy((void *)datavaddr,
(void *)bpage->vaddr, bpage->datacount);
if (tempvaddr != 0)
pmap_quick_remove_page(tempvaddr);
bpage = STAILQ_NEXT(bpage, links);
}
dmat->bounce_zone->total_bounced++;
}
if (op & BUS_DMASYNC_POSTREAD) {
while (bpage != NULL) {
tempvaddr = 0;
datavaddr = bpage->datavaddr;
if (datavaddr == 0) {
tempvaddr = pmap_quick_enter_page(
bpage->datapage);
datavaddr = tempvaddr |
bpage->dataoffs;
}
bcopy((void *)bpage->vaddr,
(void *)datavaddr, bpage->datacount);
if (tempvaddr != 0)
pmap_quick_remove_page(tempvaddr);
bpage = STAILQ_NEXT(bpage, links);
}
dmat->bounce_zone->total_bounced++;
}
}
powerpc_sync();
}
int
bus_dma_tag_set_iommu(bus_dma_tag_t tag, device_t iommu, void *cookie)
{
tag->iommu = iommu;
tag->iommu_cookie = cookie;
return (0);
}