#include <sys/machparam.h>
#include <sys/machsystm.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/systm.h>
#include <sys/cpuvar.h>
#include <sys/thread.h>
#include <sys/proc.h>
#include <sys/cpu.h>
#include <sys/kmem.h>
#include <sys/disp.h>
#include <sys/shm.h>
#include <sys/sysmacros.h>
#include <sys/machparam.h>
#include <sys/vmem.h>
#include <sys/vmsystm.h>
#include <sys/promif.h>
#include <sys/var.h>
#include <sys/x86_archext.h>
#include <sys/atomic.h>
#include <sys/bitmap.h>
#include <sys/controlregs.h>
#include <sys/bootconf.h>
#include <sys/bootsvcs.h>
#include <sys/bootinfo.h>
#include <sys/archsystm.h>
#include <vm/seg_kmem.h>
#include <vm/hat_i86.h>
#include <vm/as.h>
#include <vm/seg.h>
#include <vm/page.h>
#include <vm/seg_kp.h>
#include <vm/seg_kpm.h>
#include <vm/vm_dep.h>
#ifdef __xpv
#include <sys/hypervisor.h>
#endif
#include <vm/kboot_mmu.h>
#include <vm/seg_spt.h>
#include <sys/cmn_err.h>
struct hat_mmu_info mmu;
static x86pte_t *pcp_page;
static x86pte_t hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected,
x86pte_t new);
typedef struct hat_kernel_range {
level_t hkr_level;
uintptr_t hkr_start_va;
uintptr_t hkr_end_va;
} hat_kernel_range_t;
#define NUM_KERNEL_RANGE 2
static hat_kernel_range_t kernel_ranges[NUM_KERNEL_RANGE];
static int num_kernel_ranges;
uint_t use_boot_reserve = 1;
uint_t can_steal_post_boot = 0;
int enable_1gpg = 1;
int chk_optimal_1gtlb = 1;
#ifdef DEBUG
uint_t map1gcnt;
#endif
cpuset_t khat_cpuset;
kmutex_t hat_list_lock;
kcondvar_t hat_list_cv;
kmem_cache_t *hat_cache;
kmem_cache_t *hat_hash_cache;
kmem_cache_t *hat32_hash_cache;
struct hatstats hatstat;
int pt_kern;
#ifndef __xpv
extern pfn_t memseg_get_start(struct memseg *);
#endif
#define PP_GETRM(pp, rmmask) (pp->p_nrm & rmmask)
#define PP_ISMOD(pp) PP_GETRM(pp, P_MOD)
#define PP_ISREF(pp) PP_GETRM(pp, P_REF)
#define PP_ISRO(pp) PP_GETRM(pp, P_RO)
#define PP_SETRM(pp, rm) atomic_orb(&(pp->p_nrm), rm)
#define PP_SETMOD(pp) PP_SETRM(pp, P_MOD)
#define PP_SETREF(pp) PP_SETRM(pp, P_REF)
#define PP_SETRO(pp) PP_SETRM(pp, P_RO)
#define PP_CLRRM(pp, rm) atomic_andb(&(pp->p_nrm), ~(rm))
#define PP_CLRMOD(pp) PP_CLRRM(pp, P_MOD)
#define PP_CLRREF(pp) PP_CLRRM(pp, P_REF)
#define PP_CLRRO(pp) PP_CLRRM(pp, P_RO)
#define PP_CLRALL(pp) PP_CLRRM(pp, P_MOD | P_REF | P_RO)
static int
hati_constructor(void *buf, void *handle, int kmflags)
{
hat_t *hat = buf;
mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
bzero(hat->hat_pages_mapped,
sizeof (pgcnt_t) * (mmu.max_page_level + 1));
hat->hat_ism_pgcnt = 0;
hat->hat_stats = 0;
hat->hat_flags = 0;
CPUSET_ZERO(hat->hat_cpus);
hat->hat_htable = NULL;
hat->hat_ht_hash = NULL;
return (0);
}
static void
hat_list_append(hat_t *hat)
{
mutex_enter(&hat_list_lock);
hat->hat_prev = NULL;
hat->hat_next = kas.a_hat->hat_next;
if (hat->hat_next)
hat->hat_next->hat_prev = hat;
else
kas.a_hat->hat_prev = hat;
kas.a_hat->hat_next = hat;
mutex_exit(&hat_list_lock);
}
hat_t *
hat_alloc(struct as *as)
{
hat_t *hat;
htable_t *ht;
uint_t use_copied;
uint_t r;
hat_kernel_range_t *rp;
uintptr_t va;
uintptr_t eva;
uint_t start;
uint_t cnt;
htable_t *src;
boolean_t use_hat32_cache;
if (can_steal_post_boot == 0)
can_steal_post_boot = 1;
ASSERT(AS_WRITE_HELD(as));
hat = kmem_cache_alloc(hat_cache, KM_SLEEP);
hat->hat_as = as;
mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
ASSERT(hat->hat_flags == 0);
#if defined(__xpv)
use_copied = 0;
use_hat32_cache = B_FALSE;
hat->hat_max_level = mmu.max_level;
hat->hat_num_copied = 0;
hat->hat_flags = 0;
#else
if (ttoproc(curthread)->p_model == DATAMODEL_ILP32) {
use_copied = 1;
hat->hat_max_level = mmu.max_level32;
hat->hat_num_copied = mmu.num_copied_ents32;
use_hat32_cache = B_TRUE;
hat->hat_flags |= HAT_COPIED_32;
HATSTAT_INC(hs_hat_copied32);
} else if (kpti_enable == 1) {
use_copied = 1;
hat->hat_max_level = mmu.max_level;
hat->hat_num_copied = mmu.num_copied_ents;
use_hat32_cache = B_FALSE;
HATSTAT_INC(hs_hat_copied64);
} else {
use_copied = 0;
use_hat32_cache = B_FALSE;
hat->hat_max_level = mmu.max_level;
hat->hat_num_copied = 0;
hat->hat_flags = 0;
HATSTAT_INC(hs_hat_normal64);
}
#endif
if (use_copied) {
hat->hat_flags |= HAT_COPIED;
bzero(hat->hat_copied_ptes, sizeof (hat->hat_copied_ptes));
}
if (use_hat32_cache) {
hat->hat_num_hash = mmu.hat32_hash_cnt;
hat->hat_ht_hash = kmem_cache_alloc(hat32_hash_cache, KM_SLEEP);
} else {
hat->hat_num_hash = mmu.hash_cnt;
hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_SLEEP);
}
bzero(hat->hat_ht_hash, hat->hat_num_hash * sizeof (htable_t *));
hat->hat_htable = NULL;
hat->hat_ht_cached = NULL;
XPV_DISALLOW_MIGRATE();
ht = htable_create(hat, (uintptr_t)0, TOP_LEVEL(hat), NULL);
hat->hat_htable = ht;
if (hat->hat_flags & HAT_COPIED)
goto init_done;
for (r = 0; r < num_kernel_ranges; ++r) {
rp = &kernel_ranges[r];
for (va = rp->hkr_start_va; va != rp->hkr_end_va;
va += cnt * LEVEL_SIZE(rp->hkr_level)) {
if (rp->hkr_level == TOP_LEVEL(hat))
ht = hat->hat_htable;
else
ht = htable_create(hat, va, rp->hkr_level,
NULL);
start = htable_va2entry(va, ht);
cnt = HTABLE_NUM_PTES(ht) - start;
eva = va +
((uintptr_t)cnt << LEVEL_SHIFT(rp->hkr_level));
if (rp->hkr_end_va != 0 &&
(eva > rp->hkr_end_va || eva == 0))
cnt = htable_va2entry(rp->hkr_end_va, ht) -
start;
src = htable_lookup(kas.a_hat, va, rp->hkr_level);
ASSERT(src != NULL);
x86pte_copy(src, ht, start, cnt);
htable_release(src);
}
}
init_done:
#if defined(__xpv)
xen_pin(hat->hat_htable->ht_pfn, mmu.max_level);
xen_pin(hat->hat_user_ptable, mmu.max_level);
#endif
XPV_ALLOW_MIGRATE();
hat_list_append(hat);
return (hat);
}
#if !defined(__xpv)
static hat_t *
hat_cpu_alloc(cpu_t *cpu)
{
hat_t *hat;
htable_t *ht;
hat = kmem_cache_alloc(hat_cache, KM_SLEEP);
hat->hat_as = NULL;
mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
hat->hat_max_level = mmu.max_level;
hat->hat_num_copied = 0;
hat->hat_flags = HAT_PCP;
hat->hat_num_hash = mmu.hash_cnt;
hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_SLEEP);
bzero(hat->hat_ht_hash, hat->hat_num_hash * sizeof (htable_t *));
hat->hat_next = hat->hat_prev = NULL;
CPUSET_ADD(hat->hat_cpus, cpu->cpu_id);
hat->hat_htable = NULL;
hat->hat_ht_cached = NULL;
ht = htable_create(hat, (uintptr_t)0, TOP_LEVEL(hat), NULL);
hat->hat_htable = ht;
hat_list_append(hat);
return (hat);
}
#endif
void
hat_free_start(hat_t *hat)
{
ASSERT(AS_WRITE_HELD(hat->hat_as));
mutex_enter(&hat_list_lock);
while (hat->hat_flags & HAT_VICTIM)
cv_wait(&hat_list_cv, &hat_list_lock);
hat->hat_flags |= HAT_FREEING;
mutex_exit(&hat_list_lock);
}
void
hat_free_end(hat_t *hat)
{
kmem_cache_t *cache;
ASSERT(hat->hat_flags & HAT_FREEING);
ASSERT(CPU->cpu_current_hat != hat);
mutex_enter(&hat_list_lock);
if (hat->hat_prev)
hat->hat_prev->hat_next = hat->hat_next;
else
kas.a_hat->hat_next = hat->hat_next;
if (hat->hat_next)
hat->hat_next->hat_prev = hat->hat_prev;
else
kas.a_hat->hat_prev = hat->hat_prev;
mutex_exit(&hat_list_lock);
hat->hat_next = hat->hat_prev = NULL;
#if defined(__xpv)
VERIFY3U(hat->hat_flags & HAT_PCP, ==, 0);
xen_unpin(hat->hat_htable->ht_pfn);
xen_unpin(hat->hat_user_ptable);
#endif
htable_purge_hat(hat);
if (hat->hat_flags & HAT_COPIED) {
if (hat->hat_flags & HAT_COPIED_32) {
cache = hat32_hash_cache;
} else {
cache = hat_hash_cache;
}
} else {
cache = hat_hash_cache;
}
kmem_cache_free(cache, hat->hat_ht_hash);
hat->hat_ht_hash = NULL;
hat->hat_flags = 0;
hat->hat_max_level = 0;
hat->hat_num_copied = 0;
kmem_cache_free(hat_cache, hat);
}
uintptr_t
hat_kernelbase(uintptr_t va)
{
if (IN_VA_HOLE(va))
panic("_userlimit %p will fall in VA hole\n", (void *)va);
return (va);
}
static void
set_max_page_level()
{
level_t lvl;
if (!kbm_largepage_support) {
lvl = 0;
} else {
if (is_x86_feature(x86_featureset, X86FSET_1GPG)) {
lvl = 2;
if (chk_optimal_1gtlb &&
cpuid_opteron_erratum(CPU, 6671130)) {
lvl = 1;
}
if (plat_mnode_xcheck(LEVEL_SIZE(2) >>
LEVEL_SHIFT(0))) {
lvl = 1;
}
} else {
lvl = 1;
}
}
mmu.max_page_level = lvl;
if ((lvl == 2) && (enable_1gpg == 0))
mmu.umax_page_level = 1;
else
mmu.umax_page_level = lvl;
}
void
mmu_calc_user_slots(void)
{
uint_t ent, nptes;
uintptr_t shift;
nptes = mmu.top_level_count;
shift = _userlimit >> mmu.level_shift[mmu.max_level];
ent = shift & (nptes - 1);
mmu.top_level_uslots = ent + 1;
mmu.top_level_uslots32 = 1;
mmu.num_copied_ents = mmu.top_level_uslots;
mmu.num_copied_ents32 = 4;
}
void
mmu_init(void)
{
uint_t max_htables;
uint_t pa_bits;
uint_t va_bits;
int i;
if (is_x86_feature(x86_featureset, X86FSET_PGE) &&
(getcr4() & CR4_PGE) != 0)
mmu.pt_global = PT_GLOBAL;
#if !defined(__xpv)
if (kpti_enable == 1)
mmu.pt_global = 0;
#endif
mmu.pae_hat = kbm_pae_support;
if (kbm_nx_support)
mmu.pt_nx = PT_NX;
else
mmu.pt_nx = 0;
cpuid_get_addrsize(CPU, &pa_bits, &va_bits);
if ((getcr4() & CR4_LA57) != 0)
panic("5 Level paging enabled but not yet supported");
else if (va_bits > MMU_MAX4LEVELVABITS)
va_bits = MMU_MAX4LEVELVABITS;
if (va_bits < sizeof (void *) * NBBY) {
mmu.hole_start = (1ul << (va_bits - 1));
mmu.hole_end = 0ul - mmu.hole_start - 1;
} else {
mmu.hole_end = 0;
mmu.hole_start = mmu.hole_end - 1;
}
#if defined(OPTERON_ERRATUM_121)
ASSERT(hole_start == 0 || opteron_erratum_121 != 0);
hole_start = mmu.hole_start - hole_start;
#else
hole_start = mmu.hole_start;
#endif
hole_end = mmu.hole_end;
mmu.highest_pfn = mmu_btop((1ull << pa_bits) - 1);
if (mmu.pae_hat == 0 && pa_bits > 32)
mmu.highest_pfn = PFN_4G - 1;
if (mmu.pae_hat) {
mmu.pte_size = 8;
mmu.pte_size_shift = 3;
} else {
mmu.pte_size = 4;
mmu.pte_size_shift = 2;
}
if (mmu.pae_hat && !is_x86_feature(x86_featureset, X86FSET_PAE))
panic("Processor does not support PAE");
if (!is_x86_feature(x86_featureset, X86FSET_CX8))
panic("Processor does not support cmpxchg8b instruction");
mmu.num_level = 4;
mmu.max_level = 3;
mmu.ptes_per_table = 512;
mmu.top_level_count = 512;
mmu.max_level32 = 2;
mmu.level_shift[0] = 12;
mmu.level_shift[1] = 21;
mmu.level_shift[2] = 30;
mmu.level_shift[3] = 39;
for (i = 0; i < mmu.num_level; ++i) {
mmu.level_size[i] = 1UL << mmu.level_shift[i];
mmu.level_offset[i] = mmu.level_size[i] - 1;
mmu.level_mask[i] = ~mmu.level_offset[i];
}
set_max_page_level();
mmu_calc_user_slots();
mmu_page_sizes = mmu.max_page_level + 1;
mmu_exported_page_sizes = mmu.umax_page_level + 1;
mmu_legacy_page_sizes =
(mmu_exported_page_sizes > 2) ? 2 : mmu_exported_page_sizes;
for (i = 0; i <= mmu.max_page_level; ++i) {
mmu.pte_bits[i] = PT_VALID | pt_kern;
if (i > 0)
mmu.pte_bits[i] |= PT_PAGESIZE;
}
for (i = 1; i < mmu.num_level; ++i)
mmu.ptp_bits[i] = PT_PTPBITS;
max_htables = physmax / mmu.ptes_per_table;
mmu.hash_cnt = MMU_PAGESIZE / sizeof (htable_t *);
while (mmu.hash_cnt > 16 && mmu.hash_cnt >= max_htables)
mmu.hash_cnt >>= 1;
mmu.hat32_hash_cnt = mmu.hash_cnt;
#define HASH_MAX_LENGTH 4
while (mmu.hash_cnt * HASH_MAX_LENGTH < max_htables)
mmu.hash_cnt <<= 1;
}
void
hat_init()
{
cv_init(&hat_list_cv, NULL, CV_DEFAULT, NULL);
htable_init();
hment_init();
hat_cache = kmem_cache_create("hat_t",
sizeof (hat_t), 0, hati_constructor, NULL, NULL,
NULL, 0, 0);
hat_hash_cache = kmem_cache_create("HatHash",
mmu.hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL,
NULL, 0, 0);
if (mmu.hash_cnt == mmu.hat32_hash_cnt) {
hat32_hash_cache = hat_hash_cache;
} else {
hat32_hash_cache = kmem_cache_create("Hat32Hash",
mmu.hat32_hash_cnt * sizeof (htable_t *), 0, NULL, NULL,
NULL, NULL, 0, 0);
}
AS_LOCK_ENTER(&kas, RW_WRITER);
kas.a_hat = kmem_cache_alloc(hat_cache, KM_NOSLEEP);
mutex_init(&kas.a_hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
kas.a_hat->hat_as = &kas;
kas.a_hat->hat_flags = 0;
AS_LOCK_EXIT(&kas);
CPUSET_ZERO(khat_cpuset);
CPUSET_ADD(khat_cpuset, CPU->cpu_id);
ASSERT3U(mmu.max_level, >, 0);
kas.a_hat->hat_max_level = mmu.max_level;
kas.a_hat->hat_num_copied = 0;
kas.a_hat->hat_next = NULL;
kas.a_hat->hat_prev = NULL;
kas.a_hat->hat_num_hash = mmu.hash_cnt;
kas.a_hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_NOSLEEP);
bzero(kas.a_hat->hat_ht_hash, mmu.hash_cnt * sizeof (htable_t *));
kas.a_hat->hat_ht_cached = NULL;
kas.a_hat->hat_htable = NULL;
hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *),
KM_SLEEP);
}
extern void kpti_tramp_start();
extern void kpti_tramp_end();
extern void kdi_isr_start();
extern void kdi_isr_end();
extern gate_desc_t kdi_idt[NIDT];
static void
hat_pcp_setup(struct cpu *cpu)
{
#if !defined(__xpv)
struct hat_cpu_info *hci = cpu->cpu_hat_info;
uintptr_t va;
size_t len;
ASSERT(hci != NULL);
hci->hci_pcp_l2ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP);
hci->hci_pcp_l3ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP);
hci->hci_pcp_l3pfn =
hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_pcp_l3ptes);
ASSERT3U(hci->hci_pcp_l3pfn, !=, PFN_INVALID);
bcopy(pcp_page, hci->hci_pcp_l3ptes, MMU_PAGESIZE);
hci->hci_pcp_l2pfn =
hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_pcp_l2ptes);
ASSERT3U(hci->hci_pcp_l2pfn, !=, PFN_INVALID);
hci->hci_user_hat = hat_cpu_alloc(cpu);
hci->hci_user_l3pfn = hci->hci_user_hat->hat_htable->ht_pfn;
ASSERT3U(hci->hci_user_l3pfn, !=, PFN_INVALID);
hci->hci_user_l3ptes =
(x86pte_t *)hat_kpm_mapin_pfn(hci->hci_user_l3pfn);
if (kpti_enable != 1)
return;
hati_cpu_punchin(cpu, (uintptr_t)cpu->cpu_gdt, PROT_READ);
hati_cpu_punchin(cpu, (uintptr_t)cpu->cpu_idt, PROT_READ);
hati_cpu_punchin(cpu, (uintptr_t)&kdi_idt, PROT_READ);
VERIFY0((uintptr_t)&kpti_tramp_start % MMU_PAGESIZE);
VERIFY0((uintptr_t)&kpti_tramp_end % MMU_PAGESIZE);
for (va = (uintptr_t)&kpti_tramp_start;
va < (uintptr_t)&kpti_tramp_end; va += MMU_PAGESIZE) {
hati_cpu_punchin(cpu, va, PROT_READ | PROT_EXEC);
}
VERIFY3U(((uintptr_t)cpu->cpu_m.mcpu_ldt) % MMU_PAGESIZE, ==, 0);
for (va = (uintptr_t)cpu->cpu_m.mcpu_ldt, len = LDT_CPU_SIZE;
len >= MMU_PAGESIZE; va += MMU_PAGESIZE, len -= MMU_PAGESIZE) {
hati_cpu_punchin(cpu, va, PROT_READ);
}
hati_cpu_punchin(cpu, (uintptr_t)&cpu->cpu_m.mcpu_pad2[0],
PROT_READ | PROT_WRITE);
if (cpu == &cpus[0]) {
extern char dblfault_stack0[];
hati_cpu_punchin(cpu, (uintptr_t)cpu->cpu_m.mcpu_tss,
PROT_READ);
for (va = (uintptr_t)dblfault_stack0,
len = DEFAULTSTKSZ; len >= MMU_PAGESIZE;
va += MMU_PAGESIZE, len -= MMU_PAGESIZE) {
hati_cpu_punchin(cpu, va, PROT_READ | PROT_WRITE);
}
}
VERIFY0((uintptr_t)&kdi_isr_start % MMU_PAGESIZE);
VERIFY0((uintptr_t)&kdi_isr_end % MMU_PAGESIZE);
for (va = (uintptr_t)&kdi_isr_start;
va < (uintptr_t)&kdi_isr_end; va += MMU_PAGESIZE) {
hati_cpu_punchin(cpu, va, PROT_READ | PROT_EXEC);
}
#endif
}
static void
hat_pcp_teardown(cpu_t *cpu)
{
#if !defined(__xpv)
struct hat_cpu_info *hci;
if ((hci = cpu->cpu_hat_info) == NULL)
return;
if (hci->hci_pcp_l2ptes != NULL)
kmem_free(hci->hci_pcp_l2ptes, MMU_PAGESIZE);
if (hci->hci_pcp_l3ptes != NULL)
kmem_free(hci->hci_pcp_l3ptes, MMU_PAGESIZE);
if (hci->hci_user_hat != NULL) {
hat_free_start(hci->hci_user_hat);
hat_free_end(hci->hci_user_hat);
}
#endif
}
#define NEXT_HKR(r, l, s, e) { \
kernel_ranges[r].hkr_level = l; \
kernel_ranges[r].hkr_start_va = s; \
kernel_ranges[r].hkr_end_va = e; \
++r; \
}
void
hat_init_finish(void)
{
size_t size;
uint_t r = 0;
uintptr_t va;
hat_kernel_range_t *rp;
use_boot_reserve = 0;
htable_adjust_reserve();
NEXT_HKR(r, 3, kernelbase, 0);
#if defined(__xpv)
NEXT_HKR(r, 3, HYPERVISOR_VIRT_START, HYPERVISOR_VIRT_END);
#endif
num_kernel_ranges = r;
for (r = 0; r < num_kernel_ranges; ++r) {
rp = &kernel_ranges[r];
for (va = rp->hkr_start_va; va != rp->hkr_end_va;
va += LEVEL_SIZE(rp->hkr_level)) {
htable_t *ht;
if (IN_HYPERVISOR_VA(va))
continue;
if (rp->hkr_level <= mmu.max_page_level &&
(ht = htable_getpage(kas.a_hat, va, NULL)) !=
NULL) {
htable_release(ht);
continue;
}
(void) htable_create(kas.a_hat, va, rp->hkr_level - 1,
NULL);
}
}
if (mmu.pae_hat) {
pcp_page = vmem_alloc(heap_arena, MMU_PAGESIZE, VM_SLEEP);
hat_devload(kas.a_hat, (caddr_t)pcp_page, MMU_PAGESIZE,
kas.a_hat->hat_htable->ht_pfn,
#if !defined(__xpv)
PROT_WRITE |
#endif
PROT_READ | HAT_NOSYNC | HAT_UNORDERED_OK,
HAT_LOAD | HAT_LOAD_NOCONSIST);
}
hat_pcp_setup(CPU);
size = segmapsize;
hat_kmap_init((uintptr_t)segmap_start, size);
#if !defined(__xpv)
ASSERT3U(kas.a_hat->hat_htable->ht_pfn, !=, PFN_INVALID);
ASSERT3U(kpti_safe_cr3, ==,
MAKECR3(kas.a_hat->hat_htable->ht_pfn, PCID_KERNEL));
#endif
}
static void
hat_pcp_update(cpu_t *cpu, const hat_t *hat)
{
ASSERT3U(hat->hat_flags & HAT_COPIED, !=, 0);
if ((hat->hat_flags & HAT_COPIED_32) != 0) {
const x86pte_t *l2src;
x86pte_t *l2dst, *l3ptes, *l3uptes;
l2src = hat->hat_copied_ptes;
l2dst = cpu->cpu_hat_info->hci_pcp_l2ptes;
l3ptes = cpu->cpu_hat_info->hci_pcp_l3ptes;
l3uptes = cpu->cpu_hat_info->hci_user_l3ptes;
l2dst[0] = l2src[0];
l2dst[1] = l2src[1];
l2dst[2] = l2src[2];
l2dst[3] = l2src[3];
bzero(l3ptes, sizeof (x86pte_t) * mmu.top_level_uslots);
l3ptes[0] = MAKEPTP(cpu->cpu_hat_info->hci_pcp_l2pfn, 2);
bzero(l3uptes, sizeof (x86pte_t) * mmu.top_level_uslots);
l3uptes[0] = MAKEPTP(cpu->cpu_hat_info->hci_pcp_l2pfn, 2);
} else {
ASSERT3S(kpti_enable, ==, 1);
bzero(cpu->cpu_hat_info->hci_pcp_l2ptes, sizeof (x86pte_t) * 4);
bcopy(hat->hat_copied_ptes, cpu->cpu_hat_info->hci_pcp_l3ptes,
sizeof (x86pte_t) * mmu.top_level_uslots);
bcopy(hat->hat_copied_ptes, cpu->cpu_hat_info->hci_user_l3ptes,
sizeof (x86pte_t) * mmu.top_level_uslots);
}
}
static void
reset_kpti(struct kpti_frame *fr, uint64_t kcr3, uint64_t ucr3)
{
ASSERT3U(fr->kf_tr_flag, ==, 0);
#if DEBUG
if (fr->kf_kernel_cr3 != 0) {
ASSERT3U(fr->kf_lower_redzone, ==, 0xdeadbeefdeadbeef);
ASSERT3U(fr->kf_middle_redzone, ==, 0xdeadbeefdeadbeef);
ASSERT3U(fr->kf_upper_redzone, ==, 0xdeadbeefdeadbeef);
}
#endif
bzero(fr, offsetof(struct kpti_frame, kf_kernel_cr3));
bzero(&fr->kf_unused, sizeof (struct kpti_frame) -
offsetof(struct kpti_frame, kf_unused));
fr->kf_kernel_cr3 = kcr3;
fr->kf_user_cr3 = ucr3;
fr->kf_tr_ret_rsp = (uintptr_t)&fr->kf_tr_rsp;
fr->kf_lower_redzone = 0xdeadbeefdeadbeef;
fr->kf_middle_redzone = 0xdeadbeefdeadbeef;
fr->kf_upper_redzone = 0xdeadbeefdeadbeef;
}
#ifdef __xpv
static void
hat_switch_xen(hat_t *hat)
{
struct mmuext_op t[2];
uint_t retcnt;
uint_t opcnt = 1;
uint64_t newcr3;
ASSERT(!(hat->hat_flags & HAT_COPIED));
ASSERT(!(getcr4() & CR4_PCIDE));
newcr3 = MAKECR3((uint64_t)hat->hat_htable->ht_pfn, PCID_NONE);
t[0].cmd = MMUEXT_NEW_BASEPTR;
t[0].arg1.mfn = mmu_btop(pa_to_ma(newcr3));
t[1].cmd = MMUEXT_NEW_USER_BASEPTR;
if (hat == kas.a_hat)
t[1].arg1.mfn = mmu_btop(pa_to_ma(newcr3));
else
t[1].arg1.mfn = pfn_to_mfn(hat->hat_user_ptable);
++opcnt;
if (HYPERVISOR_mmuext_op(t, opcnt, &retcnt, DOMID_SELF) < 0)
panic("HYPERVISOR_mmu_update() failed");
ASSERT(retcnt == opcnt);
}
#endif
void
hat_switch(hat_t *hat)
{
cpu_t *cpu = CPU;
hat_t *old = cpu->cpu_current_hat;
if (old != NULL) {
if (old == hat)
return;
if (old != kas.a_hat)
CPUSET_ATOMIC_DEL(old->hat_cpus, cpu->cpu_id);
}
if (hat != kas.a_hat) {
CPUSET_ATOMIC_ADD(hat->hat_cpus, cpu->cpu_id);
}
cpu->cpu_current_hat = hat;
#if defined(__xpv)
hat_switch_xen(hat);
#else
struct hat_cpu_info *info = cpu->cpu_m.mcpu_hat_info;
uint64_t pcide = getcr4() & CR4_PCIDE;
uint64_t kcr3, ucr3;
pfn_t tl_kpfn;
ulong_t flag;
EQUIV(kpti_enable, !mmu.pt_global);
if (hat->hat_flags & HAT_COPIED) {
hat_pcp_update(cpu, hat);
tl_kpfn = info->hci_pcp_l3pfn;
} else {
IMPLY(kpti_enable, hat == kas.a_hat);
tl_kpfn = hat->hat_htable->ht_pfn;
}
if (pcide) {
ASSERT(kpti_enable);
kcr3 = MAKECR3(tl_kpfn, PCID_KERNEL) | CR3_NOINVL_BIT;
ucr3 = MAKECR3(info->hci_user_l3pfn, PCID_USER) |
CR3_NOINVL_BIT;
setcr3(kcr3);
if (old != kas.a_hat)
mmu_flush_tlb(FLUSH_TLB_ALL, NULL);
} else {
kcr3 = MAKECR3(tl_kpfn, PCID_NONE);
ucr3 = kpti_enable ?
MAKECR3(info->hci_user_l3pfn, PCID_NONE) :
0;
setcr3(kcr3);
}
if (pcide)
flag = intr_clear();
reset_kpti(&cpu->cpu_m.mcpu_kpti, kcr3, ucr3);
reset_kpti(&cpu->cpu_m.mcpu_kpti_flt, kcr3, ucr3);
reset_kpti(&cpu->cpu_m.mcpu_kpti_dbg, kcr3, ucr3);
if (pcide)
intr_restore(flag);
#endif
ASSERT(cpu == CPU);
}
static x86pte_t
hati_mkpte(pfn_t pfn, uint_t attr, level_t level, uint_t flags)
{
x86pte_t pte;
uint_t cache_attr = attr & HAT_ORDER_MASK;
pte = MAKEPTE(pfn, level);
if (attr & PROT_WRITE)
PTE_SET(pte, PT_WRITABLE);
if (attr & PROT_USER)
PTE_SET(pte, PT_USER);
if (!(attr & PROT_EXEC))
PTE_SET(pte, mmu.pt_nx);
if (flags & HAT_LOAD_NOCONSIST)
PTE_SET(pte, PT_NOCONSIST | PT_REF | PT_MOD);
else if (attr & HAT_NOSYNC)
PTE_SET(pte, PT_NOSYNC | PT_REF | PT_MOD);
if (cache_attr == HAT_STRICTORDER) {
PTE_SET(pte, PT_NOCACHE);
} else if (cache_attr & (HAT_UNORDERED_OK | HAT_STORECACHING_OK)) {
;
} else if (cache_attr & (HAT_MERGING_OK | HAT_LOADCACHING_OK)) {
PTE_SET(pte, PT_NOCACHE);
if (is_x86_feature(x86_featureset, X86FSET_PAT))
PTE_SET(pte, (level == 0) ? PT_PAT_4K : PT_PAT_LARGE);
else
PTE_SET(pte, PT_WRITETHRU);
} else {
panic("hati_mkpte(): bad caching attributes: %x\n", cache_attr);
}
return (pte);
}
int
hat_dup(hat_t *old, hat_t *new, caddr_t addr, size_t len, uint_t flag)
{
ASSERT((uintptr_t)addr < kernelbase);
ASSERT(new != kas.a_hat);
ASSERT(old != kas.a_hat);
return (0);
}
void
hat_swapin(hat_t *hat)
{
}
void
hat_swapout(hat_t *hat)
{
uintptr_t vaddr = (uintptr_t)0;
uintptr_t eaddr = _userlimit;
htable_t *ht = NULL;
level_t l;
XPV_DISALLOW_MIGRATE();
ASSERT(IS_PAGEALIGNED(vaddr));
ASSERT(IS_PAGEALIGNED(eaddr));
ASSERT(AS_LOCK_HELD(hat->hat_as));
if ((uintptr_t)hat->hat_as->a_userlimit < eaddr)
eaddr = (uintptr_t)hat->hat_as->a_userlimit;
while (vaddr < eaddr) {
(void) htable_walk(hat, &ht, &vaddr, eaddr);
if (ht == NULL)
break;
ASSERT(!IN_VA_HOLE(vaddr));
l = ht->ht_level;
if (ht->ht_flags & HTABLE_SHARED_PFN) {
vaddr = ht->ht_vaddr + LEVEL_SIZE(l + 1);
htable_release(ht);
ht = NULL;
continue;
}
if (ht->ht_lock_cnt == 0)
hat_unload(hat, (caddr_t)vaddr, LEVEL_SIZE(l),
HAT_UNLOAD_UNMAP);
if (ht->ht_lock_cnt > 0 && l == 0)
vaddr = ht->ht_vaddr + LEVEL_SIZE(1);
else
vaddr += LEVEL_SIZE(l);
}
if (ht)
htable_release(ht);
htable_purge_hat(hat);
XPV_ALLOW_MIGRATE();
}
size_t
hat_get_mapped_size(hat_t *hat)
{
size_t total = 0;
int l;
for (l = 0; l <= mmu.max_page_level; l++)
total += (hat->hat_pages_mapped[l] << LEVEL_SHIFT(l));
total += hat->hat_ism_pgcnt;
return (total);
}
int
hat_stats_enable(hat_t *hat)
{
atomic_inc_32(&hat->hat_stats);
return (1);
}
void
hat_stats_disable(hat_t *hat)
{
atomic_dec_32(&hat->hat_stats);
}
static void
hati_sync_pte_to_page(page_t *pp, x86pte_t pte, level_t level)
{
uint_t rm = 0;
pgcnt_t pgcnt;
if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC)
return;
if (PTE_GET(pte, PT_REF))
rm |= P_REF;
if (PTE_GET(pte, PT_MOD))
rm |= P_MOD;
if (rm == 0)
return;
ASSERT(x86_hm_held(pp));
pgcnt = page_get_pagecnt(level);
ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt));
for (; pgcnt > 0; --pgcnt) {
ASSERT(pp->p_szc >= level);
hat_page_setattr(pp, rm);
++pp;
}
}
#define PT_REMAP_BITS \
(PT_PADDR | PT_NX | PT_WRITABLE | PT_WRITETHRU | \
PT_NOCACHE | PT_PAT_4K | PT_PAT_LARGE | PT_IGNORE | PT_REF | PT_MOD)
#define REMAPASSERT(EX) if (!(EX)) panic("hati_pte_map: " #EX)
static int
hati_pte_map(
htable_t *ht,
uint_t entry,
page_t *pp,
x86pte_t pte,
int flags,
void *pte_ptr)
{
hat_t *hat = ht->ht_hat;
x86pte_t old_pte;
level_t l = ht->ht_level;
hment_t *hm;
uint_t is_consist;
uint_t is_locked;
int rv = 0;
is_consist = (pp != NULL && (flags & HAT_LOAD_NOCONSIST) == 0);
is_locked = (flags & HAT_LOAD_LOCK) != 0 && hat != kas.a_hat;
if (is_locked)
HTABLE_LOCK_INC(ht);
if (is_consist) {
x86_hm_enter(pp);
hm = hment_prepare(ht, entry, pp);
}
old_pte = x86pte_set(ht, entry, pte, pte_ptr);
if (old_pte == LPAGE_ERROR) {
if (is_locked)
HTABLE_LOCK_DEC(ht);
rv = -1;
goto done;
}
if (PTE_EQUIV(pte, old_pte))
goto done;
if (!PTE_ISVALID(old_pte)) {
if (is_consist) {
hment_assign(ht, entry, pp, hm);
x86_hm_exit(pp);
} else {
ASSERT(flags & HAT_LOAD_NOCONSIST);
}
if (ht->ht_flags & HTABLE_COPIED) {
cpu_t *cpu = CPU;
hat_pcp_update(cpu, hat);
}
HTABLE_INC(ht->ht_valid_cnt);
PGCNT_INC(hat, l);
return (rv);
}
if (!PTE_ISPAGE(old_pte, l))
panic("non-null/page mapping pte=" FMT_PTE, old_pte);
if (PTE2PFN(old_pte, l) != PTE2PFN(pte, l)) {
REMAPASSERT(flags & HAT_LOAD_REMAP);
REMAPASSERT(flags & HAT_LOAD_NOCONSIST);
REMAPASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST);
REMAPASSERT(pf_is_memory(PTE2PFN(old_pte, l)) ==
pf_is_memory(PTE2PFN(pte, l)));
REMAPASSERT(!is_consist);
}
if (PTE_GET(old_pte, ~PT_REMAP_BITS) != PTE_GET(pte, ~PT_REMAP_BITS))
panic("remap bits changed: old_pte="FMT_PTE", pte="FMT_PTE"\n",
old_pte, pte);
done:
if (is_consist) {
x86_hm_exit(pp);
if (hm != NULL)
hment_free(hm);
}
return (rv);
}
static int
hati_load_common(
hat_t *hat,
uintptr_t va,
page_t *pp,
uint_t attr,
uint_t flags,
level_t level,
pfn_t pfn)
{
htable_t *ht;
uint_t entry;
x86pte_t pte;
int rv = 0;
++curthread->t_hatdepth;
ASSERT(curthread->t_hatdepth < 16);
ASSERT(hat == kas.a_hat || (hat->hat_flags & HAT_PCP) != 0 ||
AS_LOCK_HELD(hat->hat_as));
if (flags & HAT_LOAD_SHARE)
hat->hat_flags |= HAT_SHARED;
ht = htable_lookup(hat, va, level);
if (pp == NULL)
flags |= HAT_LOAD_NOCONSIST;
if (ht == NULL) {
ht = htable_create(hat, va, level, NULL);
ASSERT(ht != NULL);
}
if (ht->ht_vaddr > va || va > HTABLE_LAST_PAGE(ht)) {
panic("hati_load_common: bad htable: va=%p, last page=%p, "
"ht->ht_vaddr=%p, ht->ht_level=%d", (void *)va,
(void *)HTABLE_LAST_PAGE(ht), (void *)ht->ht_vaddr,
(int)ht->ht_level);
}
entry = htable_va2entry(va, ht);
ASSERT(ht->ht_busy > 0);
ASSERT(ht->ht_level == level);
if (hat == kas.a_hat)
attr &= ~PROT_USER;
pte = hati_mkpte(pfn, attr, level, flags);
if (hat == kas.a_hat && va >= kernelbase)
PTE_SET(pte, mmu.pt_global);
rv = hati_pte_map(ht, entry, pp, pte, flags, NULL);
htable_release(ht);
--curthread->t_hatdepth;
return (rv);
}
static void
hat_kmap_load(
caddr_t addr,
page_t *pp,
uint_t attr,
uint_t flags)
{
uintptr_t va = (uintptr_t)addr;
x86pte_t pte;
pfn_t pfn = page_pptonum(pp);
pgcnt_t pg_off = mmu_btop(va - mmu.kmap_addr);
htable_t *ht;
uint_t entry;
void *pte_ptr;
attr &= ~PROT_USER;
attr |= HAT_STORECACHING_OK;
pte = hati_mkpte(pfn, attr, 0, flags);
PTE_SET(pte, mmu.pt_global);
if (mmu.pae_hat)
pte_ptr = mmu.kmap_ptes + pg_off;
else
pte_ptr = (x86pte32_t *)mmu.kmap_ptes + pg_off;
ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr) >>
LEVEL_SHIFT(1)];
entry = htable_va2entry(va, ht);
++curthread->t_hatdepth;
ASSERT(curthread->t_hatdepth < 16);
(void) hati_pte_map(ht, entry, pp, pte, flags, pte_ptr);
--curthread->t_hatdepth;
}
static uint_t supported_memload_flags =
HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_ADV | HAT_LOAD_NOCONSIST |
HAT_LOAD_SHARE | HAT_NO_KALLOC | HAT_LOAD_REMAP | HAT_LOAD_TEXT;
void
hat_memload(
hat_t *hat,
caddr_t addr,
page_t *pp,
uint_t attr,
uint_t flags)
{
uintptr_t va = (uintptr_t)addr;
level_t level = 0;
pfn_t pfn = page_pptonum(pp);
XPV_DISALLOW_MIGRATE();
ASSERT(IS_PAGEALIGNED(va));
ASSERT(hat == kas.a_hat || va < _userlimit);
ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
ASSERT((flags & supported_memload_flags) == flags);
ASSERT(!IN_VA_HOLE(va));
ASSERT(!PP_ISFREE(pp));
if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) {
ASSERT(hat == kas.a_hat);
hat_kmap_load(addr, pp, attr, flags);
XPV_ALLOW_MIGRATE();
return;
}
attr |= HAT_STORECACHING_OK;
if (hati_load_common(hat, va, pp, attr, flags, level, pfn) != 0)
panic("unexpected hati_load_common() failure");
XPV_ALLOW_MIGRATE();
}
void
hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp,
uint_t attr, uint_t flags, hat_region_cookie_t rcookie)
{
hat_memload(hat, addr, pp, attr, flags);
}
void
hat_memload_array(
hat_t *hat,
caddr_t addr,
size_t len,
page_t **pages,
uint_t attr,
uint_t flags)
{
uintptr_t va = (uintptr_t)addr;
uintptr_t eaddr = va + len;
level_t level;
size_t pgsize;
pgcnt_t pgindx = 0;
pfn_t pfn;
pgcnt_t i;
XPV_DISALLOW_MIGRATE();
ASSERT(IS_PAGEALIGNED(va));
ASSERT(hat == kas.a_hat || va + len <= _userlimit);
ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
ASSERT((flags & supported_memload_flags) == flags);
attr |= HAT_STORECACHING_OK;
while (va < eaddr) {
pfn = page_pptonum(pages[pgindx]);
for (level = mmu.max_page_level; ; --level) {
pgsize = LEVEL_SIZE(level);
if (level == 0)
break;
if (!IS_P2ALIGNED(va, pgsize) ||
(eaddr - va) < pgsize ||
!IS_P2ALIGNED(pfn_to_pa(pfn), pgsize))
continue;
if (pages[pgindx]->p_szc >= level) {
for (i = 0; i < mmu_btop(pgsize); ++i) {
if (pfn + i !=
page_pptonum(pages[pgindx + i]))
break;
ASSERT(pages[pgindx + i]->p_szc >=
level);
ASSERT(pages[pgindx] + i ==
pages[pgindx + i]);
}
if (i == mmu_btop(pgsize)) {
#ifdef DEBUG
if (level == 2)
map1gcnt++;
#endif
break;
}
}
}
ASSERT(!IN_VA_HOLE(va));
while (hati_load_common(hat, va, pages[pgindx], attr,
flags, level, pfn) != 0) {
if (level == 0)
panic("unexpected hati_load_common() failure");
--level;
pgsize = LEVEL_SIZE(level);
}
va += pgsize;
pgindx += mmu_btop(pgsize);
}
XPV_ALLOW_MIGRATE();
}
void
hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len,
struct page **pps, uint_t attr, uint_t flags,
hat_region_cookie_t rcookie)
{
hat_memload_array(hat, addr, len, pps, attr, flags);
}
int supported_devload_flags = HAT_LOAD | HAT_LOAD_LOCK |
HAT_LOAD_NOCONSIST | HAT_STRICTORDER | HAT_UNORDERED_OK |
HAT_MERGING_OK | HAT_LOADCACHING_OK | HAT_STORECACHING_OK;
void
hat_devload(
hat_t *hat,
caddr_t addr,
size_t len,
pfn_t pfn,
uint_t attr,
int flags)
{
uintptr_t va = ALIGN2PAGE(addr);
uintptr_t eva = va + len;
level_t level;
size_t pgsize;
page_t *pp;
int f;
uint_t a;
XPV_DISALLOW_MIGRATE();
ASSERT(IS_PAGEALIGNED(va));
ASSERT(hat == kas.a_hat || eva <= _userlimit);
ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
ASSERT((flags & supported_devload_flags) == flags);
while (va < eva) {
for (level = mmu.max_page_level; ; --level) {
pgsize = LEVEL_SIZE(level);
if (level == 0)
break;
if (IS_P2ALIGNED(va, pgsize) &&
(eva - va) >= pgsize &&
IS_P2ALIGNED(pfn, mmu_btop(pgsize))) {
#ifdef DEBUG
if (level == 2)
map1gcnt++;
#endif
break;
}
}
a = attr;
f = flags;
if (!pf_is_memory(pfn))
f |= HAT_LOAD_NOCONSIST;
else if (!(a & HAT_PLAT_NOCACHE))
a |= HAT_STORECACHING_OK;
if (f & HAT_LOAD_NOCONSIST)
pp = NULL;
else
pp = page_numtopp_nolock(pfn);
if (pp != NULL) {
if (PP_ISFREE(pp)) {
panic("hat_devload: loading "
"a mapping to free page %p", (void *)pp);
}
if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) {
panic("hat_devload: loading a mapping "
"to an unlocked page %p",
(void *)pp);
}
}
ASSERT(!IN_VA_HOLE(va));
while (hati_load_common(hat, va, pp, a, f, level, pfn) != 0) {
if (level == 0)
panic("unexpected hati_load_common() failure");
--level;
pgsize = LEVEL_SIZE(level);
}
va += pgsize;
pfn += mmu_btop(pgsize);
}
XPV_ALLOW_MIGRATE();
}
void
hat_unlock(hat_t *hat, caddr_t addr, size_t len)
{
uintptr_t vaddr = (uintptr_t)addr;
uintptr_t eaddr = vaddr + len;
htable_t *ht = NULL;
ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
ASSERT(IS_PAGEALIGNED(vaddr));
ASSERT(IS_PAGEALIGNED(eaddr));
if (hat == kas.a_hat)
return;
if (eaddr > _userlimit)
panic("hat_unlock() address out of range - above _userlimit");
XPV_DISALLOW_MIGRATE();
ASSERT(AS_LOCK_HELD(hat->hat_as));
while (vaddr < eaddr) {
(void) htable_walk(hat, &ht, &vaddr, eaddr);
if (ht == NULL)
break;
ASSERT(!IN_VA_HOLE(vaddr));
if (ht->ht_lock_cnt < 1)
panic("hat_unlock(): lock_cnt < 1, "
"htable=%p, vaddr=%p\n", (void *)ht, (void *)vaddr);
HTABLE_LOCK_DEC(ht);
vaddr += LEVEL_SIZE(ht->ht_level);
}
if (ht)
htable_release(ht);
XPV_ALLOW_MIGRATE();
}
void
hat_unlock_region(struct hat *hat, caddr_t addr, size_t len,
hat_region_cookie_t rcookie)
{
panic("No shared region support on x86");
}
#if !defined(__xpv)
static int
hati_demap_func(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3)
{
_NOTE(ARGUNUSED(a3));
hat_t *hat = (hat_t *)a1;
tlb_range_t *range = (tlb_range_t *)a2;
if (hat != kas.a_hat && hat != CPU->cpu_current_hat)
return (0);
if (range->tr_va != DEMAP_ALL_ADDR) {
mmu_flush_tlb(FLUSH_TLB_RANGE, range);
return (0);
}
if (hat->hat_flags & HAT_COPIED) {
hat_pcp_update(CPU, hat);
}
mmu_flush_tlb(FLUSH_TLB_NONGLOBAL, NULL);
return (0);
}
#define TLBIDLE_CPU_HALTED (0x1UL)
#define TLBIDLE_INVAL_ALL (0x2UL)
#define CAS_TLB_INFO(cpu, old, new) \
atomic_cas_ulong((ulong_t *)&(cpu)->cpu_m.mcpu_tlb_info, (old), (new))
void
tlb_going_idle(void)
{
atomic_or_ulong((ulong_t *)&CPU->cpu_m.mcpu_tlb_info,
TLBIDLE_CPU_HALTED);
}
void
tlb_service(void)
{
ulong_t tlb_info;
ulong_t found;
tlb_info = CPU->cpu_m.mcpu_tlb_info;
if (tlb_info & TLBIDLE_CPU_HALTED) {
ASSERT(CPU->cpu_current_hat == kas.a_hat);
while ((found = CAS_TLB_INFO(CPU, tlb_info, 0)) != tlb_info) {
ASSERT(found & TLBIDLE_CPU_HALTED);
tlb_info = found;
SMT_PAUSE();
}
if (tlb_info & TLBIDLE_INVAL_ALL)
mmu_flush_tlb(FLUSH_TLB_ALL, NULL);
}
}
#endif
void
hat_tlb_inval_range(hat_t *hat, tlb_range_t *in_range)
{
extern int flushes_require_xcalls;
cpuset_t justme;
cpuset_t cpus_to_shootdown;
tlb_range_t range = *in_range;
#ifndef __xpv
cpuset_t check_cpus;
cpu_t *cpup;
int c;
#endif
if (hat->hat_flags & HAT_FREEING)
return;
if (hat->hat_flags & HAT_SHARED) {
hat = kas.a_hat;
range.tr_va = DEMAP_ALL_ADDR;
}
if (panicstr || !flushes_require_xcalls) {
#ifdef __xpv
if (range.tr_va == DEMAP_ALL_ADDR) {
xen_flush_tlb();
} else {
for (size_t i = 0; i < TLB_RANGE_LEN(&range);
i += MMU_PAGESIZE) {
xen_flush_va((caddr_t)(range.tr_va + i));
}
}
#else
(void) hati_demap_func((xc_arg_t)hat, (xc_arg_t)&range, 0);
#endif
return;
}
kpreempt_disable();
CPUSET_ONLY(justme, CPU->cpu_id);
if (hat == kas.a_hat)
cpus_to_shootdown = khat_cpuset;
else
cpus_to_shootdown = hat->hat_cpus;
#ifndef __xpv
check_cpus = cpus_to_shootdown;
for (c = 0; c < NCPU && !CPUSET_ISNULL(check_cpus); ++c) {
ulong_t tlb_info;
if (!CPU_IN_SET(check_cpus, c))
continue;
CPUSET_DEL(check_cpus, c);
cpup = cpu[c];
if (cpup == NULL)
continue;
tlb_info = cpup->cpu_m.mcpu_tlb_info;
while (tlb_info == TLBIDLE_CPU_HALTED) {
(void) CAS_TLB_INFO(cpup, TLBIDLE_CPU_HALTED,
TLBIDLE_CPU_HALTED | TLBIDLE_INVAL_ALL);
SMT_PAUSE();
tlb_info = cpup->cpu_m.mcpu_tlb_info;
}
if (tlb_info == (TLBIDLE_CPU_HALTED | TLBIDLE_INVAL_ALL)) {
HATSTAT_INC(hs_tlb_inval_delayed);
CPUSET_DEL(cpus_to_shootdown, c);
}
}
#endif
if (CPUSET_ISNULL(cpus_to_shootdown) ||
CPUSET_ISEQUAL(cpus_to_shootdown, justme)) {
#ifdef __xpv
if (range.tr_va == DEMAP_ALL_ADDR) {
xen_flush_tlb();
} else {
for (size_t i = 0; i < TLB_RANGE_LEN(&range);
i += MMU_PAGESIZE) {
xen_flush_va((caddr_t)(range.tr_va + i));
}
}
#else
(void) hati_demap_func((xc_arg_t)hat, (xc_arg_t)&range, 0);
#endif
} else {
CPUSET_ADD(cpus_to_shootdown, CPU->cpu_id);
#ifdef __xpv
if (range.tr_va == DEMAP_ALL_ADDR) {
xen_gflush_tlb(cpus_to_shootdown);
} else {
for (size_t i = 0; i < TLB_RANGE_LEN(&range);
i += MMU_PAGESIZE) {
xen_gflush_va((caddr_t)(range.tr_va + i),
cpus_to_shootdown);
}
}
#else
xc_call((xc_arg_t)hat, (xc_arg_t)&range, 0,
CPUSET2BV(cpus_to_shootdown), hati_demap_func);
#endif
}
kpreempt_enable();
}
void
hat_tlb_inval(hat_t *hat, uintptr_t va)
{
tlb_range_t range;
range.tr_va = va;
range.tr_cnt = 1;
range.tr_level = MIN_PAGE_LEVEL;
hat_tlb_inval_range(hat, &range);
}
void
hat_pte_unmap(
htable_t *ht,
uint_t entry,
uint_t flags,
x86pte_t old_pte,
void *pte_ptr,
boolean_t tlb)
{
hat_t *hat = ht->ht_hat;
hment_t *hm = NULL;
page_t *pp = NULL;
level_t l = ht->ht_level;
pfn_t pfn;
if ((flags & HAT_UNLOAD_UNLOCK) != 0 && hat != kas.a_hat) {
ASSERT(ht->ht_lock_cnt > 0);
HTABLE_LOCK_DEC(ht);
}
ASSERT(ht->ht_busy > 0);
while (PTE_ISVALID(old_pte)) {
pfn = PTE2PFN(old_pte, l);
if (PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST) {
pp = NULL;
} else {
#ifdef __xpv
if (pfn == PFN_INVALID)
panic("Invalid PFN, but not PT_NOCONSIST");
#endif
pp = page_numtopp_nolock(pfn);
if (pp == NULL) {
panic("no page_t, not NOCONSIST: old_pte="
FMT_PTE " ht=%lx entry=0x%x pte_ptr=%lx",
old_pte, (uintptr_t)ht, entry,
(uintptr_t)pte_ptr);
}
x86_hm_enter(pp);
}
old_pte = x86pte_inval(ht, entry, old_pte, pte_ptr, tlb);
if (PTE_ISVALID(old_pte) && PTE2PFN(old_pte, l) == pfn)
break;
if (pp != NULL) {
x86_hm_exit(pp);
pp = NULL;
} else {
ASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST);
}
}
if (!PTE_ISVALID(old_pte)) {
if (pp != NULL)
x86_hm_exit(pp);
return;
}
if (pp != NULL) {
if (!(flags & HAT_UNLOAD_NOSYNC))
hati_sync_pte_to_page(pp, old_pte, l);
hm = hment_remove(pp, ht, entry);
x86_hm_exit(pp);
if (hm != NULL)
hment_free(hm);
}
ASSERT(ht->ht_valid_cnt > 0);
HTABLE_DEC(ht->ht_valid_cnt);
PGCNT_DEC(hat, l);
}
static void
hat_kmap_unload(caddr_t addr, size_t len, uint_t flags)
{
uintptr_t va = (uintptr_t)addr;
uintptr_t eva = va + len;
pgcnt_t pg_index;
htable_t *ht;
uint_t entry;
x86pte_t *pte_ptr;
x86pte_t old_pte;
for (; va < eva; va += MMU_PAGESIZE) {
pg_index = mmu_btop(va - mmu.kmap_addr);
pte_ptr = PT_INDEX_PTR(mmu.kmap_ptes, pg_index);
old_pte = GET_PTE(pte_ptr);
ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr)
>> LEVEL_SHIFT(1)];
entry = htable_va2entry(va, ht);
hat_pte_unmap(ht, entry, flags, old_pte, pte_ptr, B_TRUE);
}
}
void
hat_unload(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
{
uintptr_t va = (uintptr_t)addr;
XPV_DISALLOW_MIGRATE();
ASSERT(hat == kas.a_hat || va + len <= _userlimit);
if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) {
ASSERT(hat == kas.a_hat);
hat_kmap_unload(addr, len, flags);
} else {
hat_unload_callback(hat, addr, len, flags, NULL);
}
XPV_ALLOW_MIGRATE();
}
static void
handle_ranges(hat_t *hat, hat_callback_t *cb, uint_t cnt, tlb_range_t *range)
{
while (cnt > 0) {
--cnt;
hat_tlb_inval_range(hat, &range[cnt]);
if (cb != NULL) {
cb->hcb_start_addr = (caddr_t)range[cnt].tr_va;
cb->hcb_end_addr = cb->hcb_start_addr;
cb->hcb_end_addr += range[cnt].tr_cnt <<
LEVEL_SHIFT(range[cnt].tr_level);
cb->hcb_function(cb);
}
}
}
#define MAX_UNLOAD_CNT (8)
void
hat_unload_callback(
hat_t *hat,
caddr_t addr,
size_t len,
uint_t flags,
hat_callback_t *cb)
{
uintptr_t vaddr = (uintptr_t)addr;
uintptr_t eaddr = vaddr + len;
htable_t *ht = NULL;
uint_t entry;
uintptr_t contig_va = (uintptr_t)-1L;
tlb_range_t r[MAX_UNLOAD_CNT];
uint_t r_cnt = 0;
x86pte_t old_pte;
XPV_DISALLOW_MIGRATE();
ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
ASSERT(IS_PAGEALIGNED(vaddr));
ASSERT(IS_PAGEALIGNED(eaddr));
if (cb == NULL && len == MMU_PAGESIZE) {
ht = htable_getpte(hat, vaddr, &entry, &old_pte, 0);
if (ht != NULL) {
if (PTE_ISVALID(old_pte)) {
hat_pte_unmap(ht, entry, flags, old_pte,
NULL, B_TRUE);
}
htable_release(ht);
}
XPV_ALLOW_MIGRATE();
return;
}
while (vaddr < eaddr) {
old_pte = htable_walk(hat, &ht, &vaddr, eaddr);
if (ht == NULL)
break;
ASSERT(!IN_VA_HOLE(vaddr));
if (vaddr < (uintptr_t)addr)
panic("hat_unload_callback(): unmap inside large page");
if (vaddr != contig_va ||
(r_cnt > 0 && r[r_cnt - 1].tr_level != ht->ht_level)) {
if (r_cnt == MAX_UNLOAD_CNT) {
handle_ranges(hat, cb, r_cnt, r);
r_cnt = 0;
}
r[r_cnt].tr_va = vaddr;
r[r_cnt].tr_cnt = 0;
r[r_cnt].tr_level = ht->ht_level;
++r_cnt;
}
entry = htable_va2entry(vaddr, ht);
hat_pte_unmap(ht, entry, flags, old_pte, NULL, B_FALSE);
ASSERT(ht->ht_level <= mmu.max_page_level);
vaddr += LEVEL_SIZE(ht->ht_level);
contig_va = vaddr;
++r[r_cnt - 1].tr_cnt;
}
if (ht)
htable_release(ht);
if (r_cnt > 0)
handle_ranges(hat, cb, r_cnt, r);
XPV_ALLOW_MIGRATE();
}
void
hat_flush_range(hat_t *hat, caddr_t va, size_t size)
{
ssize_t sz;
caddr_t endva = va + size;
while (va < endva) {
sz = hat_getpagesize(hat, va);
if (sz < 0) {
#ifdef __xpv
xen_flush_tlb();
#else
mmu_flush_tlb(FLUSH_TLB_ALL, NULL);
#endif
break;
}
#ifdef __xpv
xen_flush_va(va);
#else
mmu_flush_tlb_kpage((uintptr_t)va);
#endif
va += sz;
}
}
void
hat_sync(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
{
uintptr_t vaddr = (uintptr_t)addr;
uintptr_t eaddr = vaddr + len;
htable_t *ht = NULL;
uint_t entry;
x86pte_t pte;
x86pte_t save_pte;
x86pte_t new;
page_t *pp;
ASSERT(!IN_VA_HOLE(vaddr));
ASSERT(IS_PAGEALIGNED(vaddr));
ASSERT(IS_PAGEALIGNED(eaddr));
ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
XPV_DISALLOW_MIGRATE();
for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) {
try_again:
pte = htable_walk(hat, &ht, &vaddr, eaddr);
if (ht == NULL)
break;
entry = htable_va2entry(vaddr, ht);
if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC ||
PTE_GET(pte, PT_REF | PT_MOD) == 0)
continue;
pp = page_numtopp_nolock(PTE2PFN(pte, ht->ht_level));
if (pp == NULL)
break;
x86_hm_enter(pp);
save_pte = pte;
pte = x86pte_get(ht, entry);
if (pte != save_pte) {
x86_hm_exit(pp);
goto try_again;
}
if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC ||
PTE_GET(pte, PT_REF | PT_MOD) == 0) {
x86_hm_exit(pp);
continue;
}
if (flags == HAT_SYNC_ZERORM) {
new = pte;
PTE_CLR(new, PT_REF | PT_MOD);
pte = hati_update_pte(ht, entry, pte, new);
if (pte != 0) {
x86_hm_exit(pp);
goto try_again;
}
} else {
hati_sync_pte_to_page(pp, save_pte, ht->ht_level);
}
x86_hm_exit(pp);
}
if (ht)
htable_release(ht);
XPV_ALLOW_MIGRATE();
}
void
hat_map(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
{
}
uint_t
hat_getattr(hat_t *hat, caddr_t addr, uint_t *attr)
{
uintptr_t vaddr = ALIGN2PAGE(addr);
htable_t *ht = NULL;
x86pte_t pte;
ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
if (IN_VA_HOLE(vaddr))
return ((uint_t)-1);
ht = htable_getpte(hat, vaddr, NULL, &pte, mmu.max_page_level);
if (ht == NULL)
return ((uint_t)-1);
if (!PTE_ISVALID(pte) || !PTE_ISPAGE(pte, ht->ht_level)) {
htable_release(ht);
return ((uint_t)-1);
}
*attr = PROT_READ;
if (PTE_GET(pte, PT_WRITABLE))
*attr |= PROT_WRITE;
if (PTE_GET(pte, PT_USER))
*attr |= PROT_USER;
if (!PTE_GET(pte, mmu.pt_nx))
*attr |= PROT_EXEC;
if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC)
*attr |= HAT_NOSYNC;
htable_release(ht);
return (0);
}
#define HAT_LOAD_ATTR 1
#define HAT_SET_ATTR 2
#define HAT_CLR_ATTR 3
static void
hat_updateattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr, int what)
{
uintptr_t vaddr = (uintptr_t)addr;
uintptr_t eaddr = (uintptr_t)addr + len;
htable_t *ht = NULL;
uint_t entry;
x86pte_t oldpte, newpte;
page_t *pp;
XPV_DISALLOW_MIGRATE();
ASSERT(IS_PAGEALIGNED(vaddr));
ASSERT(IS_PAGEALIGNED(eaddr));
ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) {
try_again:
oldpte = htable_walk(hat, &ht, &vaddr, eaddr);
if (ht == NULL)
break;
if (PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOCONSIST)
continue;
pp = page_numtopp_nolock(PTE2PFN(oldpte, ht->ht_level));
if (pp == NULL)
continue;
x86_hm_enter(pp);
newpte = oldpte;
if (what == HAT_SET_ATTR || what == HAT_LOAD_ATTR) {
if ((attr & PROT_WRITE) &&
!PTE_GET(oldpte, PT_WRITABLE))
newpte |= PT_WRITABLE;
if ((attr & HAT_NOSYNC) &&
PTE_GET(oldpte, PT_SOFTWARE) < PT_NOSYNC)
newpte |= PT_NOSYNC;
if ((attr & PROT_EXEC) && PTE_GET(oldpte, mmu.pt_nx))
newpte &= ~mmu.pt_nx;
}
if (what == HAT_LOAD_ATTR) {
if (!(attr & PROT_WRITE) &&
PTE_GET(oldpte, PT_WRITABLE))
newpte &= ~PT_WRITABLE;
if (!(attr & HAT_NOSYNC) &&
PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC)
newpte &= ~PT_SOFTWARE;
if (!(attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx))
newpte |= mmu.pt_nx;
}
if (what == HAT_CLR_ATTR) {
if ((attr & PROT_WRITE) && PTE_GET(oldpte, PT_WRITABLE))
newpte &= ~PT_WRITABLE;
if ((attr & HAT_NOSYNC) &&
PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC)
newpte &= ~PT_SOFTWARE;
if ((attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx))
newpte |= mmu.pt_nx;
}
if (PTE_GET(newpte, PT_SOFTWARE) >= PT_NOSYNC)
newpte |= PT_REF | PT_MOD;
if (newpte != oldpte) {
entry = htable_va2entry(vaddr, ht);
oldpte = hati_update_pte(ht, entry, oldpte, newpte);
if (oldpte != 0) {
x86_hm_exit(pp);
goto try_again;
}
}
x86_hm_exit(pp);
}
if (ht)
htable_release(ht);
XPV_ALLOW_MIGRATE();
}
void
hat_setattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
{
ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
hat_updateattr(hat, addr, len, attr, HAT_SET_ATTR);
}
void
hat_clrattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
{
ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
hat_updateattr(hat, addr, len, attr, HAT_CLR_ATTR);
}
void
hat_chgattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
{
ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
hat_updateattr(hat, addr, len, attr, HAT_LOAD_ATTR);
}
void
hat_chgprot(hat_t *hat, caddr_t addr, size_t len, uint_t vprot)
{
ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
hat_updateattr(hat, addr, len, vprot & HAT_PROT_MASK, HAT_LOAD_ATTR);
}
ssize_t
hat_getpagesize(hat_t *hat, caddr_t addr)
{
uintptr_t vaddr = ALIGN2PAGE(addr);
htable_t *ht;
size_t pagesize;
ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
if (IN_VA_HOLE(vaddr))
return (-1);
ht = htable_getpage(hat, vaddr, NULL);
if (ht == NULL)
return (-1);
pagesize = LEVEL_SIZE(ht->ht_level);
htable_release(ht);
return (pagesize);
}
pfn_t
hat_getpfnum(hat_t *hat, caddr_t addr)
{
uintptr_t vaddr = ALIGN2PAGE(addr);
htable_t *ht;
uint_t entry;
pfn_t pfn = PFN_INVALID;
ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
if (khat_running == 0)
return (PFN_INVALID);
if (IN_VA_HOLE(vaddr))
return (PFN_INVALID);
XPV_DISALLOW_MIGRATE();
if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) {
x86pte_t pte;
pgcnt_t pg_index;
pg_index = mmu_btop(vaddr - mmu.kmap_addr);
pte = GET_PTE(PT_INDEX_PTR(mmu.kmap_ptes, pg_index));
if (PTE_ISVALID(pte))
pfn = PTE2PFN(pte, 0);
XPV_ALLOW_MIGRATE();
return (pfn);
}
ht = htable_getpage(hat, vaddr, &entry);
if (ht == NULL) {
XPV_ALLOW_MIGRATE();
return (PFN_INVALID);
}
ASSERT(vaddr >= ht->ht_vaddr);
ASSERT(vaddr <= HTABLE_LAST_PAGE(ht));
pfn = PTE2PFN(x86pte_get(ht, entry), ht->ht_level);
if (ht->ht_level > 0)
pfn += mmu_btop(vaddr & LEVEL_OFFSET(ht->ht_level));
htable_release(ht);
XPV_ALLOW_MIGRATE();
return (pfn);
}
int
hat_probe(hat_t *hat, caddr_t addr)
{
uintptr_t vaddr = ALIGN2PAGE(addr);
uint_t entry;
htable_t *ht;
pgcnt_t pg_off;
ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
if (IN_VA_HOLE(vaddr))
return (0);
if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) {
pg_off = mmu_btop(vaddr - mmu.kmap_addr);
if (mmu.pae_hat)
return (PTE_ISVALID(mmu.kmap_ptes[pg_off]));
else
return (PTE_ISVALID(
((x86pte32_t *)mmu.kmap_ptes)[pg_off]));
}
ht = htable_getpage(hat, vaddr, &entry);
htable_release(ht);
return (ht != NULL);
}
static int
is_it_dism(hat_t *hat, caddr_t va)
{
struct seg *seg;
struct shm_data *shmd;
struct spt_data *sptd;
seg = as_findseg(hat->hat_as, va, 0);
ASSERT(seg != NULL);
ASSERT(seg->s_base <= va);
shmd = (struct shm_data *)seg->s_data;
ASSERT(shmd != NULL);
sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
ASSERT(sptd != NULL);
if (sptd->spt_flags & SHM_PAGEABLE)
return (1);
return (0);
}
int
hat_share(
hat_t *hat,
caddr_t addr,
hat_t *ism_hat,
caddr_t src_addr,
size_t len,
uint_t ismszc)
{
uintptr_t vaddr_start = (uintptr_t)addr;
uintptr_t vaddr;
uintptr_t eaddr = vaddr_start + len;
uintptr_t ism_addr_start = (uintptr_t)src_addr;
uintptr_t ism_addr = ism_addr_start;
uintptr_t e_ism_addr = ism_addr + len;
htable_t *ism_ht = NULL;
htable_t *ht;
x86pte_t pte;
page_t *pp;
pfn_t pfn;
level_t l;
pgcnt_t pgcnt;
uint_t prot;
int is_dism;
int flags;
ASSERT(hat != kas.a_hat);
ASSERT(eaddr <= _userlimit);
if (!(ism_hat->hat_flags & HAT_SHARED)) {
ASSERT(hat_get_mapped_size(ism_hat) == 0);
return (0);
}
XPV_DISALLOW_MIGRATE();
ASSERT(IS_PAGEALIGNED(vaddr_start));
ASSERT(IS_PAGEALIGNED(ism_addr_start));
ASSERT(ism_hat->hat_flags & HAT_SHARED);
is_dism = is_it_dism(hat, addr);
while (ism_addr < e_ism_addr) {
pte = htable_walk(ism_hat, &ism_ht, &ism_addr, e_ism_addr);
if (ism_ht == NULL)
break;
l = ism_ht->ht_level;
vaddr = vaddr_start + (ism_addr - ism_addr_start);
ht = htable_lookup(hat, vaddr, l);
if (ht != NULL) {
if (ht->ht_flags & HTABLE_SHARED_PFN)
goto shared;
htable_release(ht);
goto not_shared;
}
if (l == mmu.max_level)
goto not_shared;
if (is_dism && l > 0)
goto not_shared;
if (ism_addr != ism_ht->ht_vaddr ||
(vaddr & LEVEL_OFFSET(l + 1)) != 0)
goto not_shared;
if (e_ism_addr - ism_addr < LEVEL_SIZE(l + 1))
goto not_shared;
if (l > 0) {
int e;
for (e = 1; e < HTABLE_NUM_PTES(ism_ht); ++e) {
x86pte_t pte;
pte = x86pte_get(ism_ht, e);
if (!PTE_ISPAGE(pte, l))
goto not_shared;
}
}
ht = htable_create(hat, vaddr, l, ism_ht);
shared:
ASSERT(ht->ht_flags & HTABLE_SHARED_PFN);
ASSERT(ht->ht_shares == ism_ht);
hat->hat_ism_pgcnt +=
(ism_ht->ht_valid_cnt - ht->ht_valid_cnt) <<
(LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT);
ht->ht_valid_cnt = ism_ht->ht_valid_cnt;
htable_release(ht);
ism_addr = ism_ht->ht_vaddr + LEVEL_SIZE(l + 1);
htable_release(ism_ht);
ism_ht = NULL;
continue;
not_shared:
for (l = ism_ht->ht_level; l > 0; --l) {
if (LEVEL_SIZE(l) <= eaddr - vaddr &&
(vaddr & LEVEL_OFFSET(l)) == 0)
break;
}
if (eaddr - vaddr >= LEVEL_SIZE(ism_ht->ht_level)) {
pgcnt = mmu_btop(LEVEL_SIZE(ism_ht->ht_level));
} else {
pgcnt = mmu_btop(eaddr - vaddr);
l = 0;
}
pfn = PTE2PFN(pte, ism_ht->ht_level);
ASSERT(pfn != PFN_INVALID);
while (pgcnt > 0) {
pp = page_numtopp_nolock(pfn);
ASSERT(pp != NULL);
prot = PROT_USER | PROT_READ | HAT_UNORDERED_OK;
if (PTE_GET(pte, PT_WRITABLE))
prot |= PROT_WRITE;
if (!PTE_GET(pte, PT_NX))
prot |= PROT_EXEC;
flags = HAT_LOAD;
if (!is_dism)
flags |= HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST;
while (hati_load_common(hat, vaddr, pp, prot, flags,
l, pfn) != 0) {
if (l == 0)
panic("hati_load_common() failure");
--l;
}
vaddr += LEVEL_SIZE(l);
ism_addr += LEVEL_SIZE(l);
pfn += mmu_btop(LEVEL_SIZE(l));
pgcnt -= mmu_btop(LEVEL_SIZE(l));
}
}
if (ism_ht != NULL)
htable_release(ism_ht);
XPV_ALLOW_MIGRATE();
return (0);
}
void
hat_unshare(hat_t *hat, caddr_t addr, size_t len, uint_t ismszc)
{
uint64_t vaddr = (uintptr_t)addr;
uintptr_t eaddr = vaddr + len;
htable_t *ht = NULL;
uint_t need_demaps = 0;
int flags = HAT_UNLOAD_UNMAP;
level_t l;
ASSERT(hat != kas.a_hat);
ASSERT(eaddr <= _userlimit);
ASSERT(IS_PAGEALIGNED(vaddr));
ASSERT(IS_PAGEALIGNED(eaddr));
XPV_DISALLOW_MIGRATE();
l = mmu.max_page_level;
if (l == mmu.max_level)
--l;
for (; l >= 0; --l) {
for (vaddr = (uintptr_t)addr; vaddr < eaddr;
vaddr = (vaddr & LEVEL_MASK(l + 1)) + LEVEL_SIZE(l + 1)) {
ASSERT(!IN_VA_HOLE(vaddr));
ht = htable_lookup(hat, vaddr, l);
if (ht == NULL)
continue;
if (ht->ht_flags & HTABLE_SHARED_PFN) {
hat->hat_ism_pgcnt -= ht->ht_valid_cnt <<
(LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT);
ht->ht_valid_cnt = 0;
need_demaps = 1;
}
htable_release(ht);
}
}
if (!(hat->hat_flags & HAT_FREEING) && need_demaps)
hat_tlb_inval(hat, DEMAP_ALL_ADDR);
if (!is_it_dism(hat, addr))
flags |= HAT_UNLOAD_UNLOCK;
hat_unload(hat, addr, len, flags);
XPV_ALLOW_MIGRATE();
}
void
hat_reserve(struct as *as, caddr_t addr, size_t len)
{
}
static void
hati_page_clrwrt(struct page *pp)
{
hment_t *hm = NULL;
htable_t *ht;
uint_t entry;
x86pte_t old;
x86pte_t new;
uint_t pszc = 0;
XPV_DISALLOW_MIGRATE();
next_size:
x86_hm_enter(pp);
while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) {
if (ht->ht_level < pszc)
continue;
old = x86pte_get(ht, entry);
for (;;) {
if (PTE2PFN(old, ht->ht_level) != pp->p_pagenum ||
PTE_GET(old, PT_WRITABLE) == 0)
break;
new = old;
PTE_CLR(new, PT_REF | PT_MOD | PT_WRITABLE);
old = hati_update_pte(ht, entry, old, new);
if (old != 0)
continue;
break;
}
}
x86_hm_exit(pp);
while (pszc < pp->p_szc) {
page_t *tpp;
pszc++;
tpp = PP_GROUPLEADER(pp, pszc);
if (pp != tpp) {
pp = tpp;
goto next_size;
}
}
XPV_ALLOW_MIGRATE();
}
void
hat_page_setattr(struct page *pp, uint_t flag)
{
vnode_t *vp = pp->p_vnode;
kmutex_t *vphm = NULL;
page_t **listp;
int noshuffle;
noshuffle = flag & P_NSH;
flag &= ~P_NSH;
if (PP_GETRM(pp, flag) == flag)
return;
if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) &&
!noshuffle) {
vphm = page_vnode_mutex(vp);
mutex_enter(vphm);
}
PP_SETRM(pp, flag);
if (vphm != NULL) {
if (pp->p_vpnext != pp) {
page_vpsub(&vp->v_pages, pp);
if (vp->v_pages != NULL)
listp = &vp->v_pages->p_vpprev->p_vpnext;
else
listp = &vp->v_pages;
page_vpadd(listp, pp);
}
mutex_exit(vphm);
}
}
void
hat_page_clrattr(struct page *pp, uint_t flag)
{
vnode_t *vp = pp->p_vnode;
ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
PP_CLRRM(pp, flag);
if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) {
hati_page_clrwrt(pp);
}
}
uint_t
hat_page_getattr(struct page *pp, uint_t flag)
{
return (PP_GETRM(pp, flag));
}
hment_t *
hati_page_unmap(page_t *pp, htable_t *ht, uint_t entry)
{
x86pte_t old_pte;
pfn_t pfn = pp->p_pagenum;
hment_t *hm;
htable_acquire(ht);
old_pte = x86pte_inval(ht, entry, 0, NULL, B_TRUE);
if (PTE2PFN(old_pte, ht->ht_level) != pfn) {
panic("x86pte_inval() failure found PTE = " FMT_PTE
" pfn being unmapped is %lx ht=0x%lx entry=0x%x",
old_pte, pfn, (uintptr_t)ht, entry);
}
ASSERT(ht->ht_valid_cnt > 0);
HTABLE_DEC(ht->ht_valid_cnt);
PGCNT_DEC(ht->ht_hat, ht->ht_level);
if (PTE_GET(old_pte, PT_SOFTWARE) < PT_NOSYNC)
hati_sync_pte_to_page(pp, old_pte, ht->ht_level);
hm = hment_remove(pp, ht, entry);
x86_hm_exit(pp);
htable_release(ht);
return (hm);
}
extern int vpm_enable;
static int
hati_pageunload(struct page *pp, uint_t pg_szcd, uint_t forceflag)
{
page_t *cur_pp = pp;
hment_t *hm;
hment_t *prev;
htable_t *ht;
uint_t entry;
level_t level;
XPV_DISALLOW_MIGRATE();
++curthread->t_hatdepth;
ASSERT(curthread->t_hatdepth < 16);
if (vpm_enable) {
pp->p_vpmref = 0;
}
next_size:
for (;;) {
x86_hm_enter(cur_pp);
for (prev = NULL; ; prev = hm) {
hm = hment_walk(cur_pp, &ht, &entry, prev);
if (hm == NULL) {
x86_hm_exit(cur_pp);
if (cur_pp->p_szc <= pg_szcd) {
ASSERT(curthread->t_hatdepth > 0);
--curthread->t_hatdepth;
XPV_ALLOW_MIGRATE();
return (0);
}
++pg_szcd;
cur_pp = PP_GROUPLEADER(cur_pp, pg_szcd);
goto next_size;
}
level = ht->ht_level;
if (level == pg_szcd)
break;
}
hm = hati_page_unmap(cur_pp, ht, entry);
if (hm != NULL)
hment_free(hm);
}
}
int
hat_pageunload(struct page *pp, uint_t forceflag)
{
ASSERT(PAGE_EXCL(pp));
return (hati_pageunload(pp, 0, forceflag));
}
void
hat_page_demote(page_t *pp)
{
uint_t pszc;
uint_t rszc;
uint_t szc;
page_t *rootpp;
page_t *firstpp;
page_t *lastpp;
pgcnt_t pgcnt;
ASSERT(PAGE_EXCL(pp));
ASSERT(!PP_ISFREE(pp));
ASSERT(page_szc_lock_assert(pp));
if (pp->p_szc == 0)
return;
rootpp = PP_GROUPLEADER(pp, 1);
(void) hati_pageunload(rootpp, 1, HAT_FORCE_PGUNLOAD);
again:
pszc = pp->p_szc;
if (pszc == 0)
return;
rootpp = PP_GROUPLEADER(pp, pszc);
x86_hm_enter(rootpp);
if ((rszc = rootpp->p_szc) != pszc) {
x86_hm_exit(rootpp);
if (rszc > pszc) {
ASSERT(pp != rootpp);
rootpp = PP_GROUPLEADER(rootpp, rszc);
x86_hm_enter(rootpp);
x86_hm_exit(rootpp);
}
goto again;
}
ASSERT(pp->p_szc == pszc);
szc = pszc;
while (szc > 1) {
lastpp = PP_GROUPLEADER(pp, szc);
pgcnt = page_get_pagecnt(szc);
lastpp += pgcnt - 1;
firstpp = PP_GROUPLEADER(pp, (szc - 1));
pgcnt = page_get_pagecnt(szc - 1);
if (lastpp - firstpp < pgcnt) {
szc--;
continue;
}
firstpp += pgcnt;
while (lastpp != firstpp) {
ASSERT(lastpp->p_szc == pszc);
lastpp->p_szc = szc - 1;
lastpp--;
}
firstpp->p_szc = szc - 1;
szc--;
}
szc = 0;
while (szc < pszc) {
firstpp = PP_GROUPLEADER(pp, (szc + 1));
if (szc == 0) {
pgcnt = page_get_pagecnt(1);
lastpp = firstpp + (pgcnt - 1);
} else {
lastpp = PP_GROUPLEADER(pp, szc);
if (firstpp == lastpp) {
szc++;
continue;
}
lastpp--;
pgcnt = page_get_pagecnt(szc);
}
while (lastpp != firstpp) {
ASSERT(lastpp->p_szc == pszc);
lastpp->p_szc = szc;
lastpp--;
}
firstpp->p_szc = szc;
if (firstpp == rootpp)
break;
szc++;
}
x86_hm_exit(rootpp);
}
uint_t
hat_pagesync(struct page *pp, uint_t flags)
{
hment_t *hm = NULL;
htable_t *ht;
uint_t entry;
x86pte_t old, save_old;
x86pte_t new;
uchar_t nrmbits = P_REF|P_MOD|P_RO;
extern ulong_t po_share;
page_t *save_pp = pp;
uint_t pszc = 0;
ASSERT(PAGE_LOCKED(pp) || panicstr);
if (PP_ISRO(pp) && (flags & HAT_SYNC_STOPON_MOD))
return (pp->p_nrm & nrmbits);
if ((flags & HAT_SYNC_ZERORM) == 0) {
if ((flags & HAT_SYNC_STOPON_REF) != 0 && PP_ISREF(pp))
return (pp->p_nrm & nrmbits);
if ((flags & HAT_SYNC_STOPON_MOD) != 0 && PP_ISMOD(pp))
return (pp->p_nrm & nrmbits);
if ((flags & HAT_SYNC_STOPON_SHARED) != 0 &&
hat_page_getshare(pp) > po_share) {
if (PP_ISRO(pp))
PP_SETREF(pp);
return (pp->p_nrm & nrmbits);
}
}
XPV_DISALLOW_MIGRATE();
next_size:
x86_hm_enter(pp);
while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) {
if (ht->ht_level < pszc)
continue;
old = x86pte_get(ht, entry);
try_again:
ASSERT(PTE2PFN(old, ht->ht_level) == pp->p_pagenum);
if (PTE_GET(old, PT_REF | PT_MOD) == 0)
continue;
save_old = old;
if ((flags & HAT_SYNC_ZERORM) != 0) {
new = old;
PTE_CLR(new, PT_REF | PT_MOD);
old = hati_update_pte(ht, entry, old, new);
if (old != 0)
goto try_again;
old = save_old;
}
if (!(flags & HAT_SYNC_ZERORM) &&
PTE_GET(old, PT_SOFTWARE) <= PT_NOSYNC)
hati_sync_pte_to_page(pp, old, ht->ht_level);
if (((flags & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp)) ||
((flags & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp))) {
x86_hm_exit(pp);
goto done;
}
}
x86_hm_exit(pp);
while (pszc < pp->p_szc) {
page_t *tpp;
pszc++;
tpp = PP_GROUPLEADER(pp, pszc);
if (pp != tpp) {
pp = tpp;
goto next_size;
}
}
done:
XPV_ALLOW_MIGRATE();
return (save_pp->p_nrm & nrmbits);
}
ulong_t
hat_page_getshare(page_t *pp)
{
uint_t cnt;
cnt = hment_mapcnt(pp);
if (vpm_enable && pp->p_vpmref) {
cnt += 1;
}
return (cnt);
}
int
hat_page_checkshare(page_t *pp, ulong_t sh_thresh)
{
return (hat_page_getshare(pp) > sh_thresh);
}
faultcode_t
hat_softlock(
hat_t *hat,
caddr_t addr,
size_t *len,
struct page **page_array,
uint_t flags)
{
return (FC_NOSUPPORT);
}
int
hat_supported(enum hat_features feature, void *arg)
{
switch (feature) {
case HAT_SHARED_PT:
return (1);
case HAT_DYNAMIC_ISM_UNMAP:
return (0);
case HAT_VMODSORT:
return (1);
case HAT_SHARED_REGIONS:
return (0);
default:
panic("hat_supported() - unknown feature");
}
return (0);
}
void
hat_thread_exit(kthread_t *thd)
{
ASSERT(thd->t_procp->p_as == &kas);
XPV_DISALLOW_MIGRATE();
hat_switch(thd->t_procp->p_as->a_hat);
XPV_ALLOW_MIGRATE();
}
void
hat_setup(hat_t *hat, int flags)
{
XPV_DISALLOW_MIGRATE();
kpreempt_disable();
hat_switch(hat);
kpreempt_enable();
XPV_ALLOW_MIGRATE();
}
hat_mempte_t
hat_mempte_setup(caddr_t addr)
{
uintptr_t va = (uintptr_t)addr;
htable_t *ht;
uint_t entry;
x86pte_t oldpte;
hat_mempte_t p;
ASSERT(IS_PAGEALIGNED(va));
ASSERT(!IN_VA_HOLE(va));
++curthread->t_hatdepth;
XPV_DISALLOW_MIGRATE();
ht = htable_getpte(kas.a_hat, va, &entry, &oldpte, 0);
if (ht == NULL) {
ht = htable_create(kas.a_hat, va, 0, NULL);
entry = htable_va2entry(va, ht);
ASSERT(ht->ht_level == 0);
oldpte = x86pte_get(ht, entry);
}
if (PTE_ISVALID(oldpte))
panic("hat_mempte_setup(): address already mapped"
"ht=%p, entry=%d, pte=" FMT_PTE, (void *)ht, entry, oldpte);
HTABLE_INC(ht->ht_valid_cnt);
htable_release(ht);
XPV_ALLOW_MIGRATE();
p = PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry);
--curthread->t_hatdepth;
return (p);
}
void
hat_mempte_release(caddr_t addr, hat_mempte_t pte_pa)
{
htable_t *ht;
XPV_DISALLOW_MIGRATE();
#ifdef __xpv
if (HYPERVISOR_update_va_mapping((uintptr_t)addr, 0,
UVMF_INVLPG | UVMF_LOCAL))
panic("HYPERVISOR_update_va_mapping() failed");
#else
{
x86pte_t *pteptr;
pteptr = x86pte_mapin(mmu_btop(pte_pa),
(pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL);
if (mmu.pae_hat)
*pteptr = 0;
else
*(x86pte32_t *)pteptr = 0;
mmu_flush_tlb_kpage((uintptr_t)addr);
x86pte_mapout();
}
#endif
ht = htable_getpte(kas.a_hat, ALIGN2PAGE(addr), NULL, NULL, 0);
if (ht == NULL)
panic("hat_mempte_release(): invalid address");
ASSERT(ht->ht_level == 0);
HTABLE_DEC(ht->ht_valid_cnt);
htable_release(ht);
XPV_ALLOW_MIGRATE();
}
void
hat_mempte_remap(
pfn_t pfn,
caddr_t addr,
hat_mempte_t pte_pa,
uint_t attr,
uint_t flags)
{
uintptr_t va = (uintptr_t)addr;
x86pte_t pte;
#ifdef DEBUG
htable_t *ht;
uint_t entry;
ASSERT(IS_PAGEALIGNED(va));
ASSERT(!IN_VA_HOLE(va));
ht = htable_getpte(kas.a_hat, va, &entry, NULL, 0);
ASSERT(ht != NULL);
ASSERT(ht->ht_level == 0);
ASSERT(ht->ht_valid_cnt > 0);
ASSERT(ht->ht_pfn == mmu_btop(pte_pa));
htable_release(ht);
#endif
XPV_DISALLOW_MIGRATE();
pte = hati_mkpte(pfn, attr, 0, flags);
#ifdef __xpv
if (HYPERVISOR_update_va_mapping(va, pte, UVMF_INVLPG | UVMF_LOCAL))
panic("HYPERVISOR_update_va_mapping() failed");
#else
{
x86pte_t *pteptr;
pteptr = x86pte_mapin(mmu_btop(pte_pa),
(pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL);
if (mmu.pae_hat)
*(x86pte_t *)pteptr = pte;
else
*(x86pte32_t *)pteptr = (x86pte32_t)pte;
mmu_flush_tlb_kpage((uintptr_t)addr);
x86pte_mapout();
}
#endif
XPV_ALLOW_MIGRATE();
}
void
hat_enter(hat_t *hat)
{
mutex_enter(&hat->hat_mutex);
}
void
hat_exit(hat_t *hat)
{
mutex_exit(&hat->hat_mutex);
}
void
hat_cpu_online(struct cpu *cpup)
{
if (cpup != CPU) {
x86pte_cpu_init(cpup);
hat_pcp_setup(cpup);
}
CPUSET_ATOMIC_ADD(khat_cpuset, cpup->cpu_id);
}
void
hat_cpu_offline(struct cpu *cpup)
{
ASSERT(cpup != CPU);
CPUSET_ATOMIC_DEL(khat_cpuset, cpup->cpu_id);
hat_pcp_teardown(cpup);
x86pte_cpu_fini(cpup);
}
void
clear_boot_mappings(uintptr_t low, uintptr_t high)
{
uintptr_t vaddr = low;
htable_t *ht = NULL;
level_t level;
uint_t entry;
x86pte_t pte;
while (vaddr < high) {
pte = htable_walk(kas.a_hat, &ht, &vaddr, high);
if (ht == NULL)
break;
level = ht->ht_level;
entry = htable_va2entry(vaddr, ht);
ASSERT(level <= mmu.max_page_level);
ASSERT(PTE_ISPAGE(pte, level));
(void) x86pte_inval(ht, entry, 0, NULL, B_TRUE);
ASSERT(ht->ht_valid_cnt > 0);
HTABLE_DEC(ht->ht_valid_cnt);
PGCNT_DEC(ht->ht_hat, ht->ht_level);
vaddr += LEVEL_SIZE(ht->ht_level);
}
if (ht)
htable_release(ht);
}
static x86pte_t
hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected, x86pte_t new)
{
page_t *pp;
uint_t rm = 0;
x86pte_t replaced;
if (PTE_GET(expected, PT_SOFTWARE) < PT_NOSYNC &&
PTE_GET(expected, PT_MOD | PT_REF) &&
(PTE_GET(new, PT_NOSYNC) || !PTE_GET(new, PT_WRITABLE) ||
!PTE_GET(new, PT_MOD | PT_REF))) {
ASSERT(!pfn_is_foreign(PTE2PFN(expected, ht->ht_level)));
pp = page_numtopp_nolock(PTE2PFN(expected, ht->ht_level));
ASSERT(pp != NULL);
if (PTE_GET(expected, PT_MOD))
rm |= P_MOD;
if (PTE_GET(expected, PT_REF))
rm |= P_REF;
PTE_CLR(new, PT_MOD | PT_REF);
}
replaced = x86pte_update(ht, entry, expected, new);
if (replaced != expected)
return (replaced);
if (rm) {
pgcnt_t pgcnt = page_get_pagecnt(ht->ht_level);
ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt));
while (pgcnt-- > 0) {
ASSERT(pp->p_szc >= ht->ht_level);
hat_page_setattr(pp, rm);
++pp;
}
}
return (0);
}
void
hat_join_srd(struct hat *hat, vnode_t *evp)
{
}
hat_region_cookie_t
hat_join_region(struct hat *hat,
caddr_t r_saddr,
size_t r_size,
void *r_obj,
u_offset_t r_objoff,
uchar_t r_perm,
uchar_t r_pgszc,
hat_rgn_cb_func_t r_cb_function,
uint_t flags)
{
panic("No shared region support on x86");
return (HAT_INVALID_REGION_COOKIE);
}
void
hat_leave_region(struct hat *hat, hat_region_cookie_t rcookie, uint_t flags)
{
panic("No shared region support on x86");
}
void
hat_dup_region(struct hat *hat, hat_region_cookie_t rcookie)
{
panic("No shared region support on x86");
}
caddr_t
hat_kpm_mapin(struct page *pp, struct kpme *kpme)
{
caddr_t vaddr;
#ifdef DEBUG
if (kpm_enable == 0) {
cmn_err(CE_WARN, "hat_kpm_mapin: kpm_enable not set\n");
return ((caddr_t)NULL);
}
if (pp == NULL || PAGE_LOCKED(pp) == 0) {
cmn_err(CE_WARN, "hat_kpm_mapin: pp zero or not locked\n");
return ((caddr_t)NULL);
}
#endif
vaddr = hat_kpm_page2va(pp, 1);
return (vaddr);
}
void
hat_kpm_mapout(struct page *pp, struct kpme *kpme, caddr_t vaddr)
{
#ifdef DEBUG
if (kpm_enable == 0) {
cmn_err(CE_WARN, "hat_kpm_mapout: kpm_enable not set\n");
return;
}
if (IS_KPM_ADDR(vaddr) == 0) {
cmn_err(CE_WARN, "hat_kpm_mapout: no kpm address\n");
return;
}
if (pp == NULL || PAGE_LOCKED(pp) == 0) {
cmn_err(CE_WARN, "hat_kpm_mapout: page zero or not locked\n");
return;
}
#endif
}
caddr_t
hat_kpm_mapin_pfn(pfn_t pfn)
{
caddr_t paddr, vaddr;
if (kpm_enable == 0)
return ((caddr_t)NULL);
paddr = (caddr_t)ptob(pfn);
vaddr = (uintptr_t)kpm_vbase + paddr;
return ((caddr_t)vaddr);
}
void
hat_kpm_mapout_pfn(pfn_t pfn)
{
}
caddr_t
hat_kpm_pfn2va(pfn_t pfn)
{
uintptr_t vaddr = (uintptr_t)kpm_vbase + mmu_ptob(pfn);
ASSERT(!pfn_is_foreign(pfn));
return ((caddr_t)vaddr);
}
caddr_t
hat_kpm_page2va(struct page *pp, int checkswap)
{
return (hat_kpm_pfn2va(pp->p_pagenum));
}
pfn_t
hat_kpm_va2pfn(caddr_t vaddr)
{
pfn_t pfn;
ASSERT(IS_KPM_ADDR(vaddr));
pfn = (pfn_t)btop(vaddr - kpm_vbase);
return (pfn);
}
page_t *
hat_kpm_vaddr2page(caddr_t vaddr)
{
pfn_t pfn;
ASSERT(IS_KPM_ADDR(vaddr));
pfn = hat_kpm_va2pfn(vaddr);
return (page_numtopp_nolock(pfn));
}
int
hat_kpm_fault(hat_t *hat, caddr_t vaddr)
{
panic("pagefault in seg_kpm. hat: 0x%p vaddr: 0x%p",
(void *)hat, (void *)vaddr);
return (0);
}
void
hat_kpm_mseghash_clear(int nentries)
{}
void
hat_kpm_mseghash_update(pgcnt_t inx, struct memseg *msp)
{}
#ifndef __xpv
void
hat_kpm_addmem_mseg_update(struct memseg *msp, pgcnt_t nkpmpgs,
offset_t kpm_pages_off)
{
_NOTE(ARGUNUSED(nkpmpgs, kpm_pages_off));
pfn_t base, end;
base = memseg_get_start(msp);
end = msp->pages_end;
hat_devload(kas.a_hat, kpm_vbase + mmu_ptob(base),
mmu_ptob(end - base), base, PROT_READ | PROT_WRITE,
HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST);
}
void
hat_kpm_addmem_mseg_insert(struct memseg *msp)
{
_NOTE(ARGUNUSED(msp));
}
void
hat_kpm_addmem_memsegs_update(struct memseg *msp)
{
_NOTE(ARGUNUSED(msp));
}
caddr_t
hat_kpm_mseg_reuse(struct memseg *msp)
{
return ((caddr_t)msp->epages);
}
void
hat_kpm_delmem_mseg_update(struct memseg *msp, struct memseg **mspp)
{
_NOTE(ARGUNUSED(msp, mspp));
ASSERT(0);
}
void
hat_kpm_split_mseg_update(struct memseg *msp, struct memseg **mspp,
struct memseg *lo, struct memseg *mid, struct memseg *hi)
{
_NOTE(ARGUNUSED(msp, mspp, lo, mid, hi));
ASSERT(0);
}
void
hat_kpm_walk(void (*func)(void *, void *, size_t), void *arg)
{
pfn_t pbase, pend;
void *base;
size_t size;
struct memseg *msp;
for (msp = memsegs; msp; msp = msp->next) {
pbase = msp->pages_base;
pend = msp->pages_end;
base = ptob(pbase) + kpm_vbase;
size = ptob(pend - pbase);
func(arg, base, size);
}
}
#else
void
hat_prepare_mapping(hat_t *hat, caddr_t addr, uint64_t *pte_ma)
{
maddr_t base_ma;
htable_t *ht;
uint_t entry;
ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE));
XPV_DISALLOW_MIGRATE();
ht = htable_create(hat, (uintptr_t)addr, 0, NULL);
if (pte_ma != NULL) {
entry = htable_va2entry((uintptr_t)addr, ht);
base_ma = pa_to_ma(ptob(ht->ht_pfn));
*pte_ma = base_ma + (entry << mmu.pte_size_shift);
}
XPV_ALLOW_MIGRATE();
}
void
hat_release_mapping(hat_t *hat, caddr_t addr)
{
htable_t *ht;
ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE));
XPV_DISALLOW_MIGRATE();
ht = htable_lookup(hat, (uintptr_t)addr, 0);
ASSERT(ht != NULL);
ASSERT(ht->ht_busy >= 2);
htable_release(ht);
htable_release(ht);
XPV_ALLOW_MIGRATE();
}
#endif
void
hati_cpu_punchin(cpu_t *cpu, uintptr_t va, uint_t attrs)
{
int ret;
pfn_t pfn;
hat_t *cpu_hat = cpu->cpu_hat_info->hci_user_hat;
ASSERT3S(kpti_enable, ==, 1);
ASSERT3P(cpu_hat, !=, NULL);
ASSERT3U(cpu_hat->hat_flags & HAT_PCP, ==, HAT_PCP);
ASSERT3U(va & MMU_PAGEOFFSET, ==, 0);
pfn = hat_getpfnum(kas.a_hat, (caddr_t)va);
VERIFY3U(pfn, !=, PFN_INVALID);
attrs |= HAT_STORECACHING_OK;
ret = hati_load_common(cpu_hat, va, NULL, attrs, 0, 0, pfn);
VERIFY3S(ret, ==, 0);
}