IOMMU_PAGE_SIZE
*((ulong_t *)result) = IOMMU_PAGE_SIZE;
IOMMU_PAGE_SIZE, sleep);
MAX(mp->dmai_attr.dma_attr_align, IOMMU_PAGE_SIZE),
pg_offset = IOMMU_PAGE_SIZE - pg_offset;
if (dev_attr_p->dma_attr_align > IOMMU_PAGE_SIZE)
vmem_free(map_p, base_addr, IOMMU_PAGE_SIZE);
if (align && (align > IOMMU_PAGE_SIZE))
align = MAX(align, IOMMU_PAGE_SIZE) - 1;
IOMMU_PAGE_SIZE - offset, flags, mp, &pfn0,
IOMMU_PAGE_SIZE) & IOMMU_PAGE_MASK);
for (vaddr = sva, i = 1; i < npages; i++, vaddr += IOMMU_PAGE_SIZE) {
IOMMU_PAGE_SIZE, flags, mp, &pfn,
for (; len; len--, dvma_addr += IOMMU_PAGE_SIZE) {
uint32_t redzone_sz = HAS_REDZONE(mp) ? IOMMU_PAGE_SIZE : 0;
mp->dmai_winsize = IOMMU_PAGE_SIZE;
mp->dmai_winsize = P2ROUNDUP(xfer_sz + pg_off, IOMMU_PAGE_SIZE);
IOMMU_PTOB(npages), IOMMU_PAGE_SIZE, 0,
mp->dmai_size = npages * IOMMU_PAGE_SIZE;
for (i = 0, a = baseaddr; i < npages; i++, a += IOMMU_PAGE_SIZE) {
IOMMU_PAGE_SIZE, flags, mp, &pfn,
- (tsb_entries * IOMMU_PAGE_SIZE);
IOMMU_PTOB(tsb_entries) - cache_size, IOMMU_PAGE_SIZE,
NULL, NULL, NULL, IOMMU_PAGE_SIZE, VM_SLEEP);
(void) vmem_xalloc(iommu_p->iommu_dvma_map, IOMMU_PAGE_SIZE,
IOMMU_PAGE_SIZE, 0, 0, va, va + IOMMU_PAGE_SIZE,
for (i = 0; i < npages; i++, kvaddr += IOMMU_PAGE_SIZE) {
i++, kva += IOMMU_PAGE_SIZE)
hat_delete_callback(kva, IOMMU_PAGE_SIZE, mp, HAC_PAGELOCK,
hat_delete_callback(vaddr, IOMMU_PAGE_SIZE - offset, mp, HAC_PAGELOCK,
vaddr = (caddr_t)(((uintptr_t)vaddr + IOMMU_PAGE_SIZE) &
hat_delete_callback(vaddr, IOMMU_PAGE_SIZE, mp, HAC_PAGELOCK,
vaddr += IOMMU_PAGE_SIZE;
#define IOMMU_PAGE_MASK ~(IOMMU_PAGE_SIZE - 1)
#define IOMMU_PAGE_OFFSET (IOMMU_PAGE_SIZE - 1)