#include <sys/types.h>
#include <sys/atomic.h>
#include <sys/kmem.h>
#include <sys/sysmacros.h>
#include <sys/sunddi.h>
#include <sys/panic.h>
#include <vm/hat.h>
#include <vm/as.h>
#include <vm/hat_i86.h>
#include <sys/vmm_gpt.h>
#include <sys/vmm_gpt_impl.h>
typedef struct vmm_gpt_node vmm_gpt_node_t;
struct vmm_gpt_node {
uint64_t vgn_host_pfn;
uint16_t vgn_level;
uint16_t vgn_index;
uint32_t vgn_ref_cnt;
vmm_gpt_node_t *vgn_parent;
vmm_gpt_node_t *vgn_children;
vmm_gpt_node_t *vgn_sib_next;
vmm_gpt_node_t *vgn_sib_prev;
vmm_gpt_entry_t *vgn_entries;
uint64_t vgn_gpa;
};
#define PTE_PER_TABLE 512
#define MAX_NODE_IDX (PTE_PER_TABLE - 1)
struct vmm_gpt {
vmm_gpt_node_t *vgpt_root;
};
void
vmm_gpt_impl_panic(void)
{
panic("Indirect function not hot-patched");
}
uint64_t vmm_gpti_map_table(uint64_t);
uint64_t vmm_gpti_map_page(uint64_t, uint_t, uint8_t);
bool vmm_gpti_parse(uint64_t, pfn_t *, uint_t *);
const struct vmm_pte_impl vmm_pte_uninit_impl = {
.vpi_map_table = (void *)vmm_gpt_impl_panic,
.vpi_map_page = (void *)vmm_gpt_impl_panic,
.vpi_pte_parse = (void *)vmm_gpt_impl_panic,
.vpi_bit_accessed = 0,
.vpi_bit_dirty = 0,
.vpi_get_pmtp = (void *)vmm_gpt_impl_panic,
.vpi_hw_ad_supported = (void *)vmm_gpt_impl_panic,
};
static const struct vmm_pte_impl *vmm_pte_impl = &vmm_pte_uninit_impl;
#define JMP_NEAR_OPCODE 0xe9
struct jmp_instr {
uint8_t opcode;
int32_t off;
} __packed;
static uintptr_t
jmp_off_to_addr(const struct jmp_instr *instp)
{
const uintptr_t next_rip = (uintptr_t)&instp[1];
const uintptr_t off = (uintptr_t)(intptr_t)instp->off;
return (next_rip + off);
}
static bool
jmp_addr_to_off(const struct jmp_instr *instp, void *target, int32_t *resp)
{
const uintptr_t next_rip = (uintptr_t)&instp[1];
const intptr_t off = (uintptr_t)target - next_rip;
if (off < INT32_MIN || off > INT32_MAX) {
return (false);
}
if (resp != NULL) {
*resp = (int32_t)off;
}
return (true);
}
static inline const char *
addr_to_sym(void *addr)
{
ulong_t ignored_offset;
const char *name = modgetsymname((uintptr_t)addr, &ignored_offset);
return (name != NULL ? name : "<null>");
}
static bool
vmm_gpt_patch_indirection(const struct vmm_pte_impl *old_impl,
const struct vmm_pte_impl *new_impl)
{
struct indirection_patch {
void (*patch_site)();
void (*old_implf)();
void (*new_implf)();
};
const struct indirection_patch patches[] = {
{
.patch_site = (void *)vmm_gpti_map_table,
.old_implf = (void *)old_impl->vpi_map_table,
.new_implf = (void *)new_impl->vpi_map_table,
},
{
.patch_site = (void *)vmm_gpti_map_page,
.old_implf = (void *)old_impl->vpi_map_page,
.new_implf = (void *)new_impl->vpi_map_page,
},
{
.patch_site = (void *)vmm_gpti_parse,
.old_implf = (void *)old_impl->vpi_pte_parse,
.new_implf = (void *)new_impl->vpi_pte_parse,
},
};
for (uint_t i = 0; i < ARRAY_SIZE(patches); i++) {
const struct indirection_patch *patch = &patches[i];
const struct jmp_instr *instp = (void *)patch->patch_site;
if (instp->opcode != JMP_NEAR_OPCODE) {
cmn_err(CE_WARN, "vmm: non-JMP instruction found when "
"attempting to hotpatch %s",
addr_to_sym(patch->patch_site));
return (false);
}
const uintptr_t old_target = jmp_off_to_addr(instp);
if (old_target != (uintptr_t)patch->old_implf) {
cmn_err(CE_WARN, "vmm: JMP instr @ %s has unexpected "
"target %s != %s",
addr_to_sym(patch->patch_site),
addr_to_sym((void *)old_target),
addr_to_sym(patch->old_implf));
return (false);
}
if (!jmp_addr_to_off(instp, patch->new_implf, NULL)) {
cmn_err(CE_WARN, "vmm: near-JMP to new target %s is "
"too far for site %s",
addr_to_sym(patch->new_implf),
addr_to_sym(patch->patch_site));
return (false);
}
}
for (uint_t i = 0; i < ARRAY_SIZE(patches); i++) {
const struct indirection_patch *patch = &patches[i];
struct jmp_instr *instp = (void *)patch->patch_site;
int32_t new_off;
VERIFY(jmp_addr_to_off(instp, patch->new_implf, &new_off));
hot_patch_kernel_text((caddr_t)&instp->off, new_off, 4);
}
return (true);
}
bool
vmm_gpt_init(const struct vmm_pte_impl *target_impl)
{
VERIFY3P(vmm_pte_impl, ==, &vmm_pte_uninit_impl);
if (vmm_gpt_patch_indirection(vmm_pte_impl, target_impl)) {
vmm_pte_impl = target_impl;
return (true);
}
return (false);
}
void
vmm_gpt_fini(void)
{
VERIFY(vmm_gpt_patch_indirection(vmm_pte_impl, &vmm_pte_uninit_impl));
vmm_pte_impl = &vmm_pte_uninit_impl;
}
static vmm_gpt_node_t *
vmm_gpt_node_alloc(void)
{
vmm_gpt_node_t *node;
caddr_t page;
node = kmem_zalloc(sizeof (*node), KM_SLEEP);
page = kmem_zalloc(PAGESIZE, KM_SLEEP);
node->vgn_entries = (vmm_gpt_entry_t *)page;
node->vgn_host_pfn = hat_getpfnum(kas.a_hat, page);
return (node);
}
vmm_gpt_t *
vmm_gpt_alloc(void)
{
vmm_gpt_t *gpt = kmem_zalloc(sizeof (vmm_gpt_t), KM_SLEEP);
gpt->vgpt_root = vmm_gpt_node_alloc();
return (gpt);
}
static void
vmm_gpt_node_free(vmm_gpt_node_t *node)
{
ASSERT(node != NULL);
ASSERT3U(node->vgn_ref_cnt, ==, 0);
ASSERT(node->vgn_host_pfn != PFN_INVALID);
ASSERT(node->vgn_entries != NULL);
ASSERT(node->vgn_parent == NULL);
kmem_free(node->vgn_entries, PAGESIZE);
kmem_free(node, sizeof (*node));
}
void
vmm_gpt_free(vmm_gpt_t *gpt)
{
vmm_gpt_vacate_region(gpt, 0, UINT64_MAX & PAGEMASK);
VERIFY(gpt->vgpt_root != NULL);
VERIFY3U(gpt->vgpt_root->vgn_ref_cnt, ==, 0);
vmm_gpt_node_free(gpt->vgpt_root);
kmem_free(gpt, sizeof (*gpt));
}
static inline uint16_t
vmm_gpt_lvl_index(vmm_gpt_node_level_t level, uint64_t gpa)
{
ASSERT(level < MAX_GPT_LEVEL);
const uint16_t mask = MAX_NODE_IDX;
switch (level) {
case LEVEL4: return ((gpa >> 39) & mask);
case LEVEL3: return ((gpa >> 30) & mask);
case LEVEL2: return ((gpa >> 21) & mask);
case LEVEL1: return ((gpa >> 12) & mask);
default:
panic("impossible level value");
};
}
static inline uint64_t
vmm_gpt_lvl_mask(vmm_gpt_node_level_t level)
{
ASSERT(level < MAX_GPT_LEVEL);
switch (level) {
case LEVEL4: return (0xffffff8000000000ul);
case LEVEL3: return (0xffffffffc0000000ul);
case LEVEL2: return (0xffffffffffe00000ul);
case LEVEL1: return (0xfffffffffffff000ul);
default:
panic("impossible level value");
};
}
static inline uint64_t
vmm_gpt_lvl_len(vmm_gpt_node_level_t level)
{
ASSERT(level < MAX_GPT_LEVEL);
switch (level) {
case LEVEL4: return (0x8000000000ul);
case LEVEL3: return (0x40000000ul);
case LEVEL2: return (0x200000ul);
case LEVEL1: return (0x1000ul);
default:
panic("impossible level value");
};
}
static inline uint16_t
vmm_gpt_ptep_index(const vmm_gpt_entry_t *ptep)
{
const uintptr_t offset = (uintptr_t)ptep & 0xffful;
return (offset / sizeof (uint64_t));
}
static inline uint64_t
vmm_gpt_node_end(vmm_gpt_node_t *node)
{
ASSERT(node->vgn_level > LEVEL4);
return (node->vgn_gpa + vmm_gpt_lvl_len(node->vgn_level - 1));
}
static inline bool
vmm_gpt_node_is_last(vmm_gpt_node_t *node)
{
return (node->vgn_index == MAX_NODE_IDX);
}
static uint16_t
vmm_gpt_node_entries_covered(vmm_gpt_node_t *node, uint64_t start, uint64_t end)
{
const uint64_t node_end = vmm_gpt_node_end(node);
if (start >= node_end || end <= node->vgn_gpa) {
return (0);
}
const uint64_t mask = vmm_gpt_lvl_mask(node->vgn_level);
const uint64_t covered_start = MAX(node->vgn_gpa, start & mask);
const uint64_t covered_end = MIN(node_end, end & mask);
const uint64_t per_entry = vmm_gpt_lvl_len(node->vgn_level);
return ((covered_end - covered_start) / per_entry);
}
static vmm_gpt_node_t *
vmm_gpt_node_next(vmm_gpt_node_t *node, bool only_seq)
{
ASSERT3P(node->vgn_parent, !=, NULL);
ASSERT3U(node->vgn_level, >, LEVEL4);
const uint64_t gpa_match = vmm_gpt_node_end(node);
vmm_gpt_node_t *next = node->vgn_sib_next;
if (next == NULL) {
if (node->vgn_parent != NULL && node->vgn_level > LEVEL3) {
vmm_gpt_node_t *psibling =
vmm_gpt_node_next(node->vgn_parent, true);
if (psibling != NULL) {
next = psibling->vgn_children;
}
}
}
if (next != NULL &&
(next->vgn_gpa == gpa_match || !only_seq)) {
return (next);
}
return (NULL);
}
static vmm_gpt_node_t *
vmm_gpt_node_find_child(vmm_gpt_node_t *parent, uint64_t gpa)
{
const uint16_t index = vmm_gpt_lvl_index(parent->vgn_level, gpa);
for (vmm_gpt_node_t *child = parent->vgn_children;
child != NULL && child->vgn_index <= index;
child = child->vgn_sib_next) {
if (child->vgn_index == index)
return (child);
}
return (NULL);
}
static void
vmm_gpt_node_add(vmm_gpt_t *gpt, vmm_gpt_node_t *parent,
vmm_gpt_node_t *child, uint64_t gpa, vmm_gpt_node_t *prev_sibling)
{
ASSERT3U(parent->vgn_level, <, LEVEL1);
ASSERT3U(child->vgn_parent, ==, NULL);
const uint16_t idx = vmm_gpt_lvl_index(parent->vgn_level, gpa);
child->vgn_index = idx;
child->vgn_level = parent->vgn_level + 1;
child->vgn_gpa = gpa & vmm_gpt_lvl_mask(parent->vgn_level);
child->vgn_parent = parent;
if (prev_sibling != NULL) {
ASSERT3U(prev_sibling->vgn_gpa, <, child->vgn_gpa);
child->vgn_sib_next = prev_sibling->vgn_sib_next;
if (child->vgn_sib_next != NULL) {
child->vgn_sib_next->vgn_sib_prev = child;
}
child->vgn_sib_prev = prev_sibling;
prev_sibling->vgn_sib_next = child;
} else if (parent->vgn_children != NULL) {
vmm_gpt_node_t *next_sibling = parent->vgn_children;
ASSERT3U(next_sibling->vgn_gpa, >, child->vgn_gpa);
ASSERT3U(next_sibling->vgn_sib_prev, ==, NULL);
child->vgn_sib_next = next_sibling;
child->vgn_sib_prev = NULL;
next_sibling->vgn_sib_prev = child;
parent->vgn_children = child;
} else {
parent->vgn_children = child;
child->vgn_sib_next = NULL;
child->vgn_sib_prev = NULL;
}
parent->vgn_entries[idx] = vmm_gpti_map_table(child->vgn_host_pfn);
parent->vgn_ref_cnt++;
}
static void
vmm_gpt_node_remove(vmm_gpt_node_t *child)
{
ASSERT3P(child->vgn_children, ==, NULL);
ASSERT3U(child->vgn_ref_cnt, ==, 0);
ASSERT3P(child->vgn_parent, !=, NULL);
vmm_gpt_node_t *parent = child->vgn_parent;
vmm_gpt_node_t *prev = child->vgn_sib_prev;
vmm_gpt_node_t *next = child->vgn_sib_next;
if (prev != NULL) {
ASSERT3P(prev->vgn_sib_next, ==, child);
prev->vgn_sib_next = next;
}
if (next != NULL) {
ASSERT3P(next->vgn_sib_prev, ==, child);
next->vgn_sib_prev = prev;
}
if (prev == NULL) {
ASSERT3P(parent->vgn_children, ==, child);
parent->vgn_children = next;
}
child->vgn_parent = NULL;
child->vgn_sib_next = NULL;
child->vgn_sib_prev = NULL;
parent->vgn_entries[child->vgn_index] = 0;
parent->vgn_ref_cnt--;
vmm_gpt_node_free(child);
}
uint64_t
vmm_gpt_walk(vmm_gpt_t *gpt, uint64_t gpa, vmm_gpt_entry_t **entries,
vmm_gpt_node_level_t depth)
{
ASSERT(gpt != NULL);
ASSERT3U(depth, <, MAX_GPT_LEVEL);
vmm_gpt_entry_t *current_entries = gpt->vgpt_root->vgn_entries;
uint64_t mask = 0;
for (uint_t lvl = LEVEL4; lvl <= depth; lvl++) {
if (current_entries == NULL) {
entries[lvl] = NULL;
continue;
}
entries[lvl] = ¤t_entries[vmm_gpt_lvl_index(lvl, gpa)];
mask = vmm_gpt_lvl_mask(lvl);
const vmm_gpt_entry_t pte = *entries[lvl];
pfn_t pfn;
if (!vmm_gpti_parse(pte, &pfn, NULL)) {
current_entries = NULL;
continue;
}
current_entries = (vmm_gpt_entry_t *)hat_kpm_pfn2va(pfn);
}
return (gpa & mask);
}
static uint64_t
vmm_gpt_walk_advance(vmm_gpt_t *gpt, uint64_t gpa, vmm_gpt_entry_t **entries,
vmm_gpt_node_level_t depth)
{
ASSERT(gpt != NULL);
ASSERT3U(depth, <, MAX_GPT_LEVEL);
ASSERT0(gpa & ~vmm_gpt_lvl_mask(depth));
int lvl;
for (lvl = depth; lvl >= LEVEL4; lvl--) {
vmm_gpt_entry_t *ptep = entries[lvl];
if (ptep == NULL) {
continue;
}
uint16_t index = vmm_gpt_ptep_index(ptep);
ASSERT3U(vmm_gpt_lvl_index(lvl, gpa), ==, index);
if (index == MAX_NODE_IDX) {
continue;
}
gpa = (gpa & vmm_gpt_lvl_mask(lvl)) + vmm_gpt_lvl_len(lvl);
entries[lvl] = ptep + 1;
break;
}
if (lvl < LEVEL4) {
return (UINT64_MAX);
}
vmm_gpt_entry_t pte = *entries[lvl];
lvl++;
for (; lvl < MAX_GPT_LEVEL; lvl++) {
pfn_t pfn;
if (lvl > depth || !vmm_gpti_parse(pte, &pfn, NULL)) {
entries[lvl] = NULL;
continue;
}
vmm_gpt_entry_t *next_table =
(vmm_gpt_entry_t *)hat_kpm_pfn2va(pfn);
const uint16_t index = vmm_gpt_lvl_index(lvl, gpa);
pte = next_table[index];
entries[lvl] = &next_table[index];
}
return (gpa);
}
void
vmm_gpt_iter_init(vmm_gpt_iter_t *iter, vmm_gpt_t *gpt, uint64_t addr,
uint64_t len)
{
ASSERT0(addr & PAGEOFFSET);
ASSERT0(len & PAGEOFFSET);
ASSERT3U((addr + len), >=, addr);
iter->vgi_gpt = gpt;
iter->vgi_addr = addr;
iter->vgi_end = addr + len;
iter->vgi_current = vmm_gpt_walk(gpt, addr, iter->vgi_entries, LEVEL1);
}
bool
vmm_gpt_iter_next(vmm_gpt_iter_t *iter, vmm_gpt_iter_entry_t *entry)
{
if (iter->vgi_current >= iter->vgi_end) {
return (false);
}
while (iter->vgi_current < iter->vgi_end) {
bool found = false;
if (iter->vgi_entries[LEVEL1] != NULL) {
entry->vgie_gpa = iter->vgi_current;
entry->vgie_ptep = iter->vgi_entries[LEVEL1];
found = true;
}
iter->vgi_current = vmm_gpt_walk_advance(iter->vgi_gpt,
iter->vgi_current, iter->vgi_entries, LEVEL1);
if (found) {
return (true);
}
}
return (false);
}
static vmm_gpt_node_t *
vmm_gpt_populate_region_lvl(vmm_gpt_t *gpt, uint64_t addr, uint64_t len,
vmm_gpt_node_t *node_start)
{
const vmm_gpt_node_level_t lvl = node_start->vgn_level;
const uint64_t end = addr + len;
const uint64_t incr = vmm_gpt_lvl_len(lvl);
uint64_t gpa = addr & vmm_gpt_lvl_mask(lvl);
vmm_gpt_node_t *parent = node_start;
vmm_gpt_node_t *prev = NULL, *node = parent->vgn_children;
while (node != NULL && node->vgn_gpa < gpa) {
prev = node;
node = node->vgn_sib_next;
}
if (node == NULL || node->vgn_gpa > gpa) {
node = vmm_gpt_node_alloc();
vmm_gpt_node_add(gpt, parent, node, gpa, prev);
}
vmm_gpt_node_t *front_node = node;
prev = node;
gpa += incr;
for (; gpa < end; gpa += incr, prev = node) {
node = vmm_gpt_node_next(prev, true);
if (node != NULL) {
ASSERT3U(node->vgn_gpa, ==, gpa);
parent = node->vgn_parent;
continue;
}
if (vmm_gpt_node_is_last(prev)) {
parent = vmm_gpt_node_next(parent, true);
ASSERT(parent != NULL);
prev = NULL;
}
node = vmm_gpt_node_alloc();
vmm_gpt_node_add(gpt, parent, node, gpa, prev);
}
return (front_node);
}
void
vmm_gpt_populate_region(vmm_gpt_t *gpt, uint64_t addr, uint64_t len)
{
ASSERT0(addr & PAGEOFFSET);
ASSERT0(len & PAGEOFFSET);
vmm_gpt_node_t *node = gpt->vgpt_root;
for (uint_t lvl = LEVEL4; lvl < LEVEL1; lvl++) {
ASSERT3U(node->vgn_level, ==, lvl);
node = vmm_gpt_populate_region_lvl(gpt, addr, len, node);
}
uint64_t gpa = addr;
const uint64_t end = addr + len;
while (gpa < end) {
ASSERT(node != NULL);
ASSERT3U(node->vgn_level, ==, LEVEL1);
const uint16_t covered =
vmm_gpt_node_entries_covered(node, addr, end);
ASSERT(covered != 0);
ASSERT3U(node->vgn_ref_cnt, <, PTE_PER_TABLE);
ASSERT3U(node->vgn_ref_cnt + covered, <=, PTE_PER_TABLE);
node->vgn_ref_cnt += covered;
vmm_gpt_node_t *next = vmm_gpt_node_next(node, true);
if (next != NULL) {
gpa = next->vgn_gpa;
node = next;
} else {
VERIFY3U(end, <=, vmm_gpt_node_end(node));
break;
}
}
}
bool
vmm_gpt_map_at(vmm_gpt_t *gpt, vmm_gpt_entry_t *ptep, pfn_t pfn, uint_t prot,
uint8_t attr)
{
const vmm_gpt_entry_t pte = vmm_gpti_map_page(pfn, prot, attr);
const vmm_gpt_entry_t old_pte = atomic_cas_64(ptep, 0, pte);
if (old_pte != 0) {
#ifdef DEBUG
pfn_t new_pfn, old_pfn;
ASSERT(vmm_gpti_parse(pte, &new_pfn, NULL));
ASSERT(vmm_gpti_parse(old_pte, &old_pfn, NULL));
ASSERT3U(old_pfn, ==, new_pfn);
#endif
return (false);
}
return (true);
}
void
vmm_gpt_vacate_region(vmm_gpt_t *gpt, uint64_t addr, uint64_t len)
{
ASSERT0(addr & PAGEOFFSET);
ASSERT0(len & PAGEOFFSET);
const uint64_t end = addr + len;
vmm_gpt_node_t *node, *starts[MAX_GPT_LEVEL] = {
[LEVEL4] = gpt->vgpt_root,
};
for (vmm_gpt_node_level_t lvl = LEVEL4; lvl < LEVEL1; lvl++) {
node = vmm_gpt_node_find_child(starts[lvl], addr);
if (node == NULL) {
break;
}
starts[lvl + 1] = node;
}
uint64_t gpa = addr;
node = starts[LEVEL1];
while (gpa < end && node != NULL) {
const uint16_t covered =
vmm_gpt_node_entries_covered(node, addr, end);
ASSERT3U(node->vgn_ref_cnt, >=, covered);
node->vgn_ref_cnt -= covered;
node = vmm_gpt_node_next(node, false);
if (node != NULL) {
gpa = node->vgn_gpa;
}
}
for (vmm_gpt_node_level_t lvl = LEVEL1; lvl > LEVEL4; lvl--) {
gpa = addr;
node = starts[lvl];
while (gpa < end && node != NULL) {
vmm_gpt_node_t *next = vmm_gpt_node_next(node, false);
if (node->vgn_ref_cnt == 0) {
vmm_gpt_node_remove(node);
}
if (next != NULL) {
gpa = next->vgn_gpa;
}
node = next;
}
}
}
bool
vmm_gpt_unmap(vmm_gpt_t *gpt, uint64_t gpa)
{
vmm_gpt_entry_t *entries[MAX_GPT_LEVEL], pte;
ASSERT(gpt != NULL);
(void) vmm_gpt_walk(gpt, gpa, entries, LEVEL1);
if (entries[LEVEL1] == NULL)
return (false);
pte = *entries[LEVEL1];
*entries[LEVEL1] = 0;
return (vmm_gpti_parse(pte, NULL, NULL));
}
size_t
vmm_gpt_unmap_region(vmm_gpt_t *gpt, uint64_t addr, uint64_t len)
{
ASSERT0(addr & PAGEOFFSET);
ASSERT0(len & PAGEOFFSET);
vmm_gpt_iter_t state;
vmm_gpt_iter_entry_t entry;
size_t num_unmapped = 0;
vmm_gpt_iter_init(&state, gpt, addr, len);
while (vmm_gpt_iter_next(&state, &entry)) {
if (entry.vgie_ptep == NULL) {
continue;
}
const vmm_gpt_entry_t pte = *entry.vgie_ptep;
*entry.vgie_ptep = 0;
if (vmm_gpti_parse(pte, NULL, NULL)) {
num_unmapped++;
}
}
return (num_unmapped);
}
bool
vmm_gpte_is_mapped(const vmm_gpt_entry_t *ptep, pfn_t *pfnp, uint_t *protp)
{
ASSERT(ptep != NULL);
return (vmm_gpti_parse(*ptep, pfnp, protp));
}
static uint_t
vmm_gpt_reset_bits(volatile uint64_t *ptep, uint64_t mask, uint64_t bits)
{
uint64_t pte, newpte, oldpte = 0;
VERIFY3P(ptep, !=, NULL);
oldpte = *ptep;
do {
pte = oldpte;
newpte = (pte & ~mask) | bits;
oldpte = atomic_cas_64(ptep, pte, newpte);
} while (oldpte != pte);
return (oldpte & mask);
}
bool
vmm_gpte_reset_accessed(vmm_gpt_entry_t *ptep, bool on)
{
ASSERT(ptep != NULL);
const uint64_t accessed_bit = vmm_pte_impl->vpi_bit_accessed;
const uint64_t dirty_bit = vmm_pte_impl->vpi_bit_dirty;
const uint64_t old_state = vmm_gpt_reset_bits(ptep,
accessed_bit | dirty_bit, on ? accessed_bit : 0);
return (old_state != 0);
}
bool
vmm_gpte_reset_dirty(vmm_gpt_entry_t *ptep, bool on)
{
ASSERT(ptep != NULL);
const uint64_t dirty_bit = vmm_pte_impl->vpi_bit_dirty;
const uint64_t old_state =
vmm_gpt_reset_bits(ptep, dirty_bit, on ? dirty_bit : 0);
return (old_state != 0);
}
bool
vmm_gpte_query_accessed(const vmm_gpt_entry_t *ptep)
{
ASSERT(ptep != NULL);
return ((*ptep & vmm_pte_impl->vpi_bit_accessed) != 0);
}
bool
vmm_gpte_query_dirty(const vmm_gpt_entry_t *ptep)
{
ASSERT(ptep != NULL);
return ((*ptep & vmm_pte_impl->vpi_bit_dirty) != 0);
}
uint64_t
vmm_gpt_get_pmtp(vmm_gpt_t *gpt, bool track_dirty)
{
const pfn_t root_pfn = gpt->vgpt_root->vgn_host_pfn;
return (vmm_pte_impl->vpi_get_pmtp(root_pfn, track_dirty));
}
bool
vmm_gpt_can_track_dirty(vmm_gpt_t *gpt)
{
return (vmm_pte_impl->vpi_hw_ad_supported());
}