#ifndef _VM_HAT_I86_H
#define _VM_HAT_I86_H
#ifdef __cplusplus
extern "C" {
#endif
#include <sys/types.h>
#include <sys/t_lock.h>
#include <sys/cpuvar.h>
#include <sys/x_call.h>
#include <vm/seg.h>
#include <vm/page.h>
#include <sys/vmparam.h>
#include <sys/vm_machparam.h>
#include <sys/promif.h>
#include <vm/hat_pte.h>
#include <vm/htable.h>
#include <vm/hment.h>
#if defined(__xpv)
#define MAX_COPIED_PTES 1
#else
#define MAX_COPIED_PTES 512
#endif
#define TOP_LEVEL(h) (((h)->hat_max_level))
struct hat {
kmutex_t hat_mutex;
struct as *hat_as;
uint_t hat_stats;
pgcnt_t hat_pages_mapped[MAX_PAGE_LEVEL + 1];
pgcnt_t hat_ism_pgcnt;
cpuset_t hat_cpus;
uint16_t hat_flags;
uint8_t hat_max_level;
uint_t hat_num_copied;
htable_t *hat_htable;
struct hat *hat_next;
struct hat *hat_prev;
uint_t hat_num_hash;
htable_t **hat_ht_hash;
htable_t *hat_ht_cached;
x86pte_t hat_copied_ptes[MAX_COPIED_PTES];
#if defined(__amd64) && defined(__xpv)
pfn_t hat_user_ptable;
#endif
};
typedef struct hat hat_t;
#define PGCNT_INC(hat, level) \
atomic_inc_ulong(&(hat)->hat_pages_mapped[level]);
#define PGCNT_DEC(hat, level) \
atomic_dec_ulong(&(hat)->hat_pages_mapped[level]);
#define HAT_FREEING (0x0001)
#define HAT_VICTIM (0x0002)
#define HAT_SHARED (0x0004)
#define HAT_PINNED (0x0008)
#define HAT_COPIED (0x0010)
#define HAT_COPIED_32 (0x0020)
#define HAT_PCP (0x0040)
#define HAT_PLAT_NOCACHE (0x100000)
struct hatstats {
ulong_t hs_reap_attempts;
ulong_t hs_reaped;
ulong_t hs_steals;
ulong_t hs_ptable_allocs;
ulong_t hs_ptable_frees;
ulong_t hs_htable_rgets;
ulong_t hs_htable_rputs;
ulong_t hs_htable_shared;
ulong_t hs_htable_unshared;
ulong_t hs_hm_alloc;
ulong_t hs_hm_free;
ulong_t hs_hm_put_reserve;
ulong_t hs_hm_get_reserve;
ulong_t hs_hm_steals;
ulong_t hs_hm_steal_exam;
ulong_t hs_tlb_inval_delayed;
ulong_t hs_hat_copied64;
ulong_t hs_hat_copied32;
ulong_t hs_hat_normal64;
};
extern struct hatstats hatstat;
#ifdef DEBUG
#define HATSTAT_INC(x) (++hatstat.x)
#else
#define HATSTAT_INC(x) (0)
#endif
#if defined(_KERNEL)
#define ALIGN2PAGE(a) ((uintptr_t)(a) & MMU_PAGEMASK)
#define IS_PAGEALIGNED(a) (((uintptr_t)(a) & MMU_PAGEOFFSET) == 0)
extern uint_t khat_running;
extern cpuset_t khat_cpuset;
extern kmutex_t hat_list_lock;
extern kcondvar_t hat_list_cv;
typedef paddr_t hat_mempte_t;
extern hat_mempte_t hat_mempte_setup(caddr_t addr);
extern void hat_mempte_remap(pfn_t, caddr_t, hat_mempte_t,
uint_t attr, uint_t flags);
extern void hat_mempte_release(caddr_t addr, hat_mempte_t);
extern uint_t can_steal_post_boot;
extern uint_t use_boot_reserve;
#define USE_HAT_RESERVES() \
(use_boot_reserve || curthread->t_hatdepth > 1 || \
panicstr != NULL || vmem_is_populator())
extern void hat_cpu_online(struct cpu *);
extern void hat_cpu_offline(struct cpu *);
extern void setup_vaddr_for_ppcopy(struct cpu *);
extern void teardown_vaddr_for_ppcopy(struct cpu *);
extern void clear_boot_mappings(uintptr_t, uintptr_t);
#define DEMAP_ALL_ADDR (~(uintptr_t)0)
extern void halt(char *fmt);
extern void hat_kern_alloc(caddr_t segmap_base, size_t segmap_size,
caddr_t ekernelheap);
extern void hat_kern_setup(void);
extern void hat_pte_unmap(htable_t *ht, uint_t entry, uint_t flags,
x86pte_t old_pte, void *pte_ptr, boolean_t tlb);
extern void hat_init_finish(void);
extern caddr_t hat_kpm_pfn2va(pfn_t pfn);
extern pfn_t hat_kpm_va2pfn(caddr_t);
extern page_t *hat_kpm_vaddr2page(caddr_t);
extern uintptr_t hat_kernelbase(uintptr_t);
extern void hat_kmap_init(uintptr_t base, size_t len);
extern hment_t *hati_page_unmap(page_t *pp, htable_t *ht, uint_t entry);
extern void mmu_calc_user_slots(void);
extern void hat_tlb_inval(struct hat *hat, uintptr_t va);
extern void hat_switch(struct hat *hat);
#define TLB_RANGE_LEN(r) ((r)->tr_cnt << LEVEL_SHIFT((r)->tr_level))
typedef struct tlb_range {
uintptr_t tr_va;
ulong_t tr_cnt;
int8_t tr_level;
} tlb_range_t;
#if defined(__xpv)
#define XPV_DISALLOW_MIGRATE() xen_block_migrate()
#define XPV_ALLOW_MIGRATE() xen_allow_migrate()
#define mmu_flush_tlb_page(va) mmu_invlpg((caddr_t)va)
#define mmu_flush_tlb_kpage(va) mmu_invlpg((caddr_t)va)
extern void hat_prepare_mapping(hat_t *, caddr_t, uint64_t *);
extern void hat_release_mapping(hat_t *, caddr_t);
#else
#define XPV_DISALLOW_MIGRATE()
#define XPV_ALLOW_MIGRATE()
#define pfn_is_foreign(pfn) __lintzero
typedef enum flush_tlb_type {
FLUSH_TLB_ALL = 1,
FLUSH_TLB_NONGLOBAL = 2,
FLUSH_TLB_RANGE = 3,
} flush_tlb_type_t;
extern void mmu_flush_tlb(flush_tlb_type_t, tlb_range_t *);
extern void mmu_flush_tlb_kpage(uintptr_t);
extern void mmu_flush_tlb_page(uintptr_t);
extern void hati_cpu_punchin(cpu_t *cpu, uintptr_t va, uint_t attrs);
extern void tlb_going_idle(void);
extern void tlb_service(void);
#endif
#endif
#ifdef __cplusplus
}
#endif
#endif