MMU_PAGE_SIZE
*((ulong_t *)result) = MMU_PAGE_SIZE;
pg_offset = MMU_PAGE_SIZE - pg_offset;
if (dev_attr_p->dma_attr_align > MMU_PAGE_SIZE)
if (align && (align > MMU_PAGE_SIZE))
align = MAX(align, MMU_PAGE_SIZE) - 1;
vaddr = mp->dmai_object.dmao_obj.virt_obj.v_addr + MMU_PAGE_SIZE;
for (i = 1; i < npages; i++, vaddr += MMU_PAGE_SIZE) {
uint32_t redzone_sz = PX_HAS_REDZONE(mp) ? MMU_PAGE_SIZE : 0;
mp->dmai_winsize = MMU_PAGE_SIZE;
mp->dmai_winsize = P2ROUNDUP(xfer_sz + pg_off, MMU_PAGE_SIZE);
MMU_PAGE_SIZE, sleep);
MAX(mp->dmai_attr.dma_attr_align, MMU_PAGE_SIZE),
MMU_PAGE_SIZE);
MMU_PAGE_SIZE);
MMU_PTOB(npages), MMU_PAGE_SIZE, 0,
mp->dmai_size = npages * MMU_PAGE_SIZE;
#define MMU_PAGE_MASK ~(MMU_PAGE_SIZE - 1)
#define MMU_PAGE_OFFSET (MMU_PAGE_SIZE - 1)
MMU_PTOB(tsb_entries) - cache_size, MMU_PAGE_SIZE,
NULL, NULL, NULL, MMU_PAGE_SIZE, VM_SLEEP);
(void) vmem_xalloc(mmu_p->mmu_dvma_map, MMU_PAGE_SIZE,
MMU_PAGE_SIZE, 0, 0, va, va + MMU_PAGE_SIZE,
for (i = 0; i < pages; i++, a += MMU_PAGE_SIZE, tsb_index++) {
vmem_xfree(dvma_map, va, MMU_PAGE_SIZE);
ttes2map = (MMU_PAGE_SIZE - P2PHASE(ra, MMU_PAGE_SIZE)) >> 3;
hat_getpfnum(kas.a_hat, ((caddr_t)addr + (MMU_PAGE_SIZE * i))))