s390_domain
struct iommu_domain *s390_domain; /* attached IOMMU domain */
rto = get_rto_from_iova(s390_domain, iova);
struct s390_domain *s390_domain = to_s390_domain(domain);
if (WARN_ON(iova < s390_domain->domain.geometry.aperture_start ||
(iova + size - 1) > s390_domain->domain.geometry.aperture_end))
rc = s390_iommu_invalidate_trans(s390_domain, iova, pgcount);
atomic64_add(pgcount, &s390_domain->ctrs.unmapped_pages);
struct s390_domain *s390_domain;
if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED ||
zdev->s390_domain->type == IOMMU_DOMAIN_IDENTITY)
s390_domain = to_s390_domain(zdev->s390_domain);
return &s390_domain->ctrs;
static void dma_cleanup_tables(struct s390_domain *domain)
static unsigned long *dma_walk_region_tables(struct s390_domain *domain,
static unsigned long *dma_walk_cpu_trans(struct s390_domain *domain,
static struct s390_domain *to_s390_domain(struct iommu_domain *dom)
return container_of(dom, struct s390_domain, domain);
static inline u64 max_tbl_size(struct s390_domain *domain)
struct s390_domain *s390_domain;
s390_domain = kzalloc_obj(*s390_domain);
if (!s390_domain)
s390_domain->dma_table = dma_alloc_cpu_table(GFP_KERNEL);
if (!s390_domain->dma_table) {
kfree(s390_domain);
s390_domain->origin_type = ZPCI_TABLE_TYPE_RTX;
s390_domain->origin_type = ZPCI_TABLE_TYPE_RSX;
s390_domain->origin_type = ZPCI_TABLE_TYPE_RFX;
s390_domain->origin_type = ZPCI_TABLE_TYPE_RTX;
s390_domain->domain.pgsize_bitmap = SZ_4K;
s390_domain->domain.geometry.force_aperture = true;
s390_domain->domain.geometry.aperture_start = 0;
s390_domain->domain.geometry.aperture_end = max_tbl_size(s390_domain);
spin_lock_init(&s390_domain->list_lock);
INIT_LIST_HEAD_RCU(&s390_domain->devices);
return &s390_domain->domain;
struct s390_domain *s390_domain = container_of(head, struct s390_domain, rcu);
dma_cleanup_tables(s390_domain);
kfree(s390_domain);
struct s390_domain *s390_domain = to_s390_domain(domain);
WARN_ON(!list_empty(&s390_domain->devices));
call_rcu(&s390_domain->rcu, s390_iommu_rcu_free_domain);
zdev->s390_domain = domain;
static u64 get_iota_region_flag(struct s390_domain *domain)
struct s390_domain *s390_domain;
s390_domain = to_s390_domain(domain);
iota = virt_to_phys(s390_domain->dma_table) |
get_iota_region_flag(s390_domain);
rc = s390_iommu_domain_reg_ioat(zdev, zdev->s390_domain, status);
struct s390_domain *s390_domain;
if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED)
s390_domain = to_s390_domain(zdev->s390_domain);
spin_lock_irqsave(&s390_domain->list_lock, flags);
spin_unlock_irqrestore(&s390_domain->list_lock, flags);
struct s390_domain *s390_domain = to_s390_domain(domain);
zdev->dma_table = s390_domain->dma_table;
spin_lock_irqsave(&s390_domain->list_lock, flags);
list_add_rcu(&zdev->iommu_list, &s390_domain->devices);
spin_unlock_irqrestore(&s390_domain->list_lock, flags);
if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED ||
zdev->s390_domain->type == IOMMU_DOMAIN_IDENTITY) {
max_size = max_tbl_size(to_s390_domain(zdev->s390_domain));
struct s390_domain *s390_domain = to_s390_domain(domain);
list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
atomic64_inc(&s390_domain->ctrs.global_rpcits);
struct s390_domain *s390_domain = to_s390_domain(domain);
list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
atomic64_inc(&s390_domain->ctrs.sync_rpcits);
struct s390_domain *s390_domain = to_s390_domain(domain);
list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
atomic64_inc(&s390_domain->ctrs.sync_map_rpcits);
static int s390_iommu_validate_trans(struct s390_domain *s390_domain,
entry = dma_walk_cpu_trans(s390_domain, dma_addr, gfp);
entry = dma_walk_cpu_trans(s390_domain, dma_addr, gfp);
static int s390_iommu_invalidate_trans(struct s390_domain *s390_domain,
entry = dma_walk_cpu_trans(s390_domain, dma_addr, GFP_ATOMIC);
struct s390_domain *s390_domain = to_s390_domain(domain);
if (iova < s390_domain->domain.geometry.aperture_start ||
(iova + size - 1) > s390_domain->domain.geometry.aperture_end)
rc = s390_iommu_validate_trans(s390_domain, paddr, iova,
atomic64_add(pgcount, &s390_domain->ctrs.mapped_pages);
static unsigned long *get_rso_from_iova(struct s390_domain *domain,
static unsigned long *get_rto_from_iova(struct s390_domain *domain,
struct s390_domain *s390_domain = to_s390_domain(domain);