#ifndef __X86_IOMMU_X86_IOMMU_H
#define __X86_IOMMU_X86_IOMMU_H
typedef struct iommu_pte {
uint64_t pte;
} iommu_pte_t;
#define IOMMU_PAGE_SIZE PAGE_SIZE
#define IOMMU_PAGE_MASK (IOMMU_PAGE_SIZE - 1)
#define IOMMU_PAGE_SHIFT PAGE_SHIFT
#define IOMMU_NPTEPG (IOMMU_PAGE_SIZE / sizeof(iommu_pte_t))
#define IOMMU_NPTEPGSHIFT 9
#define IOMMU_PTEMASK (IOMMU_NPTEPG - 1)
struct sf_buf;
struct vm_object;
struct vm_page *iommu_pgalloc(struct vm_object *obj, vm_pindex_t idx,
int flags);
void iommu_pgfree(struct vm_object *obj, vm_pindex_t idx, int flags,
struct iommu_map_entry *entry);
void *iommu_map_pgtbl(struct vm_object *obj, vm_pindex_t idx, int flags,
struct sf_buf **sf);
void iommu_unmap_pgtbl(struct sf_buf *sf);
extern iommu_haddr_t iommu_high;
extern int iommu_tbl_pagecnt;
extern int iommu_qi_batch_coalesce;
SYSCTL_DECL(_hw_iommu);
struct x86_unit_common;
struct x86_iommu {
struct x86_unit_common *(*get_x86_common)(struct
iommu_unit *iommu);
void (*unit_pre_instantiate_ctx)(struct iommu_unit *iommu);
void (*qi_ensure)(struct iommu_unit *unit, int descr_count);
void (*qi_emit_wait_descr)(struct iommu_unit *unit, uint32_t seq,
bool, bool, bool);
void (*qi_advance_tail)(struct iommu_unit *unit);
void (*qi_invalidate_emit)(struct iommu_domain *idomain,
iommu_gaddr_t base, iommu_gaddr_t size, struct iommu_qi_genseq *
pseq, bool emit_wait);
void (*domain_unload_entry)(struct iommu_map_entry *entry, bool free,
bool cansleep);
void (*domain_unload)(struct iommu_domain *iodom,
struct iommu_map_entries_tailq *entries, bool cansleep);
struct iommu_ctx *(*get_ctx)(struct iommu_unit *iommu,
device_t dev, uint16_t rid, bool id_mapped, bool rmrr_init);
void (*free_ctx_locked)(struct iommu_unit *iommu,
struct iommu_ctx *context);
struct iommu_unit *(*find)(device_t dev, bool verbose);
int (*alloc_msi_intr)(device_t src, u_int *cookies, u_int count);
int (*map_msi_intr)(device_t src, u_int cpu, u_int vector,
u_int cookie, uint64_t *addr, uint32_t *data);
int (*unmap_msi_intr)(device_t src, u_int cookie);
int (*map_ioapic_intr)(u_int ioapic_id, u_int cpu, u_int vector,
bool edge, bool activehi, int irq, u_int *cookie, uint32_t *hi,
uint32_t *lo);
int (*unmap_ioapic_intr)(u_int ioapic_id, u_int *cookie);
};
void set_x86_iommu(struct x86_iommu *);
struct x86_iommu *get_x86_iommu(void);
struct iommu_msi_data {
int irq;
int irq_rid;
struct resource *irq_res;
void *intr_handle;
int (*handler)(void *);
int msi_data_reg;
int msi_addr_reg;
int msi_uaddr_reg;
uint64_t msi_addr;
uint32_t msi_data;
void (*enable_intr)(struct iommu_unit *);
void (*disable_intr)(struct iommu_unit *);
const char *name;
};
#define IOMMU_MAX_MSI 3
struct x86_unit_common {
uint32_t qi_buf_maxsz;
uint32_t qi_cmd_sz;
char *inv_queue;
vm_size_t inv_queue_size;
uint32_t inv_queue_avail;
uint32_t inv_queue_tail;
volatile uint64_t inv_waitd_seq_hw;
uint64_t inv_waitd_seq_hw_phys;
uint32_t inv_waitd_seq;
u_int inv_waitd_gen;
u_int inv_seq_waiters;
u_int inv_queue_full;
struct iommu_map_entry *tlb_flush_head;
struct iommu_map_entry *tlb_flush_tail;
struct task qi_task;
struct taskqueue *qi_taskqueue;
struct iommu_msi_data intrs[IOMMU_MAX_MSI];
};
void iommu_domain_free_entry(struct iommu_map_entry *entry, bool free);
void iommu_qi_emit_wait_seq(struct iommu_unit *unit, struct iommu_qi_genseq *
pseq, bool emit_wait);
void iommu_qi_wait_for_seq(struct iommu_unit *unit, const struct
iommu_qi_genseq *gseq, bool nowait);
void iommu_qi_drain_tlb_flush(struct iommu_unit *unit);
void iommu_qi_invalidate_locked(struct iommu_domain *domain,
struct iommu_map_entry *entry, bool emit_wait);
void iommu_qi_invalidate_sync(struct iommu_domain *domain, iommu_gaddr_t base,
iommu_gaddr_t size, bool cansleep);
void iommu_qi_common_init(struct iommu_unit *unit, task_fn_t taskfunc);
void iommu_qi_common_fini(struct iommu_unit *unit, void (*disable_qi)(
struct iommu_unit *));
int iommu_alloc_irq(struct iommu_unit *unit, int idx);
void iommu_release_intr(struct iommu_unit *unit, int idx);
void iommu_device_tag_init(struct iommu_ctx *ctx, device_t dev);
void iommu_device_set_iommu_prop(device_t dev, device_t iommu);
int pglvl_pgtbl_pte_off(int pglvl, iommu_gaddr_t base, int lvl);
vm_pindex_t pglvl_pgtbl_get_pindex(int pglvl, iommu_gaddr_t base, int lvl);
vm_pindex_t pglvl_max_pages(int pglvl);
iommu_gaddr_t pglvl_page_size(int total_pglvl, int lvl);
void iommu_db_print_domain_entry(const struct iommu_map_entry *entry);
void iommu_db_print_ctx(struct iommu_ctx *ctx);
void iommu_db_domain_print_contexts(struct iommu_domain *iodom);
void iommu_db_domain_print_mappings(struct iommu_domain *iodom);
#endif