SPAGE_SIZE
#define spage_offs(iova) (iova & (SPAGE_SIZE - 1))
#define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
if (size == SPAGE_SIZE) {
#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
size = SPAGE_SIZE;
size = SPAGE_SIZE;
#define SPAGE_MASK (~(SPAGE_SIZE - 1))
iova += SPAGE_SIZE;
writel((iova & SPAGE_MASK) + (num_inv - 1) * SPAGE_SIZE,
num_inv = min_t(unsigned int, size / SPAGE_SIZE, 64);
domain->domain.pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE;
SPAGE_SIZE);
SPAGE_SIZE, DMA_TO_DEVICE);
SPAGE_SIZE, DMA_TO_DEVICE);
SPAGE_SIZE, DMA_TO_DEVICE);
for (iova = iova_start; iova < iova_end; iova += SPAGE_SIZE)
rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE);
if (size > SPAGE_SIZE)
rk_iommu_zap_iova(rk_domain, iova + size - SPAGE_SIZE,
SPAGE_SIZE);
SPAGE_SIZE);
pt_dma = dma_map_single(rk_domain->dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
unsigned int pte_total = size / SPAGE_SIZE;
return pte_count * SPAGE_SIZE;
unsigned int pte_total = size / SPAGE_SIZE;
paddr += SPAGE_SIZE;
pte_count * SPAGE_SIZE);
iova += pte_count * SPAGE_SIZE;
sun50i_iommu_zap_iova(iommu, iova + SPAGE_SIZE);
if (size > SPAGE_SIZE) {
sun50i_iommu_zap_iova(iommu, iova + size + SPAGE_SIZE);
sun50i_iommu_zap_range(iommu, iova, SPAGE_SIZE);