IMMU_PAGESIZE
if (MMU_PAGESIZE != IMMU_PAGESIZE) {
MMU_PAGESIZE, IMMU_PAGESIZE);
mrng.mrng_npages = IMMU_ROUNDUP(mp->ml_size) / IMMU_PAGESIZE;
IMMU_PAGESIZE;
npages = mp->ml_size/IMMU_PAGESIZE + 1;
IMMU_PAGESIZE, /* quantum */
immu_regs_cpu_flush(immu, context->hwpg_vaddr, IMMU_PAGESIZE);
ASSERT(paddr % IMMU_PAGESIZE == 0);
dvma += IMMU_PAGESIZE;
ASSERT(paddr % IMMU_PAGESIZE == 0);
(dcookies[j].dck_npages - nppages) * IMMU_PAGESIZE;
paddr += IMMU_PAGESIZE;
dvma += IMMU_PAGESIZE;
xsize = npages * IMMU_PAGESIZE;
align = MAX((size_t)(dma_attr->dma_attr_align), IMMU_PAGESIZE);
xsize = IMMU_NPREPTES * IMMU_PAGESIZE;
align = MAX((size_t)(dma_attr->dma_attr_align), IMMU_PAGESIZE);
IMMU_NPREPTES * IMMU_PAGESIZE);
uint64_t size = npages * IMMU_PAGESIZE;
npages = (IMMU_ROUNDUP(size) / IMMU_PAGESIZE) + 1;
next = kmem_zalloc(IMMU_PAGESIZE, kmflag);
kmem_free(next, IMMU_PAGESIZE);
if (ddi_dma_mem_alloc(pgtable->hwpg_dmahdl, IMMU_PAGESIZE,
kmem_free(next, IMMU_PAGESIZE);
if (actual_size < IMMU_PAGESIZE) {
kmem_free(next, IMMU_PAGESIZE);
kmem_free(pgtable->swpg_next_array, IMMU_PAGESIZE);
bzero(pgtable->hwpg_vaddr, IMMU_PAGESIZE);
bzero(pgtable->swpg_next_array, IMMU_PAGESIZE);
((d).dck_paddr && ((d).dck_paddr + (d).dck_npages * IMMU_PAGESIZE) \
start = IMMU_PAGESIZE;
npages = mp->ml_size/IMMU_PAGESIZE + 1;
npages = mp->ml_size/IMMU_PAGESIZE + 1;
dvma += (npages * IMMU_PAGESIZE);
#define IMMU_PAGEOFFSET (IMMU_PAGESIZE - 1)
#define IMMU_PGTABLE_MAXIDX ((IMMU_PAGESIZE / sizeof (hw_pdte_t)) - 1)