#include <vm/VMTranslationMap.h>
#include <slab/Slab.h>
#include <vm/vm_page.h>
#include <vm/vm_priv.h>
#include <vm/VMAddressSpace.h>
#include <vm/VMArea.h>
#include <vm/VMCache.h>
VMTranslationMap::VMTranslationMap()
:
fMapCount(0)
{
recursive_lock_init(&fLock, "translation map");
}
VMTranslationMap::~VMTranslationMap()
{
recursive_lock_destroy(&fLock);
}
void
VMTranslationMap::UnmapPages(VMArea* area, addr_t base, size_t size,
bool updatePageQueue, bool deletingAddressSpace)
{
ASSERT(base % B_PAGE_SIZE == 0);
ASSERT(size % B_PAGE_SIZE == 0);
addr_t address = base;
addr_t end = address + size;
#if DEBUG_PAGE_ACCESS
for (; address != end; address += B_PAGE_SIZE) {
phys_addr_t physicalAddress;
uint32 flags;
if (Query(address, &physicalAddress, &flags) == B_OK
&& (flags & PAGE_PRESENT) != 0) {
vm_page* page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
if (page != NULL) {
DEBUG_PAGE_ACCESS_START(page);
UnmapPage(area, address, updatePageQueue, deletingAddressSpace);
DEBUG_PAGE_ACCESS_END(page);
} else
UnmapPage(area, address, updatePageQueue, deletingAddressSpace);
}
}
#else
for (; address != end; address += B_PAGE_SIZE)
UnmapPage(area, address, updatePageQueue, deletingAddressSpace);
#endif
}
void
VMTranslationMap::UnmapArea(VMArea* area, bool deletingAddressSpace,
bool ignoreTopCachePageFlags)
{
if (area->cache_type == CACHE_TYPE_DEVICE || area->wiring != B_NO_LOCK) {
UnmapPages(area, area->Base(), area->Size(), true, deletingAddressSpace);
return;
}
const bool unmapPages = !deletingAddressSpace || !ignoreTopCachePageFlags;
Lock();
VMAreaMappings mappings;
mappings.TakeFrom(&area->mappings);
for (VMAreaMappings::Iterator it = mappings.GetIterator();
vm_page_mapping* mapping = it.Next();) {
vm_page* page = mapping->page;
page->mappings.Remove(mapping);
VMCache* cache = page->Cache();
bool pageFullyUnmapped = false;
if (!page->IsMapped()) {
atomic_add(&gMappedPagesCount, -1);
pageFullyUnmapped = true;
}
if (unmapPages || cache != area->cache) {
const addr_t address = area->Base()
+ ((page->cache_offset * B_PAGE_SIZE) - area->cache_offset);
uint32 flags = 0;
status_t status = UnmapPage(area, address, false, deletingAddressSpace, &flags);
if (status == B_ENTRY_NOT_FOUND) {
panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
"has no translation map entry", page, area, address);
continue;
}
if (status != B_OK) {
panic("unmapping page %p for area %p (%#" B_PRIxADDR ") failed: %x",
page, area, address, status);
continue;
}
if ((flags & PAGE_ACCESSED) != 0)
page->accessed = true;
if ((flags & PAGE_MODIFIED) != 0)
page->modified = true;
if (pageFullyUnmapped) {
DEBUG_PAGE_ACCESS_START(page);
if (cache->temporary)
vm_page_set_state(page, PAGE_STATE_INACTIVE);
else if (page->modified)
vm_page_set_state(page, PAGE_STATE_MODIFIED);
else
vm_page_set_state(page, PAGE_STATE_CACHED);
DEBUG_PAGE_ACCESS_END(page);
}
}
}
Unlock();
bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
while (vm_page_mapping* mapping = mappings.RemoveHead())
vm_free_page_mapping(mapping->page->physical_page_number, mapping, freeFlags);
}
void
VMTranslationMap::DebugPrintMappingInfo(addr_t virtualAddress)
{
#if KDEBUG
kprintf("VMTranslationMap::DebugPrintMappingInfo not implemented\n");
#endif
}
bool
VMTranslationMap::DebugGetReverseMappingInfo(phys_addr_t physicalAddress,
ReverseMappingInfoCallback& callback)
{
#if KDEBUG
kprintf("VMTranslationMap::DebugGetReverseMappingInfo not implemented\n");
#endif
return false;
}
void
VMTranslationMap::PageUnmapped(VMArea* area, page_num_t pageNumber,
bool accessed, bool modified, bool updatePageQueue, VMAreaMappings* mappingsQueue)
{
if (area->cache_type == CACHE_TYPE_DEVICE) {
if (mappingsQueue == NULL)
recursive_lock_unlock(&fLock);
return;
}
vm_page* page = vm_lookup_page(pageNumber);
ASSERT_PRINT(page != NULL, "page number: %#" B_PRIxPHYSADDR
", accessed: %d, modified: %d", pageNumber, accessed, modified);
if (mappingsQueue != NULL) {
DEBUG_PAGE_ACCESS_START(page);
} else {
DEBUG_PAGE_ACCESS_CHECK(page);
}
page->accessed |= accessed;
page->modified |= modified;
vm_page_mapping* mapping = NULL;
if (area->wiring == B_NO_LOCK) {
vm_page_mappings::Iterator iterator = page->mappings.GetIterator();
while ((mapping = iterator.Next()) != NULL) {
if (mapping->area == area) {
area->mappings.Remove(mapping);
page->mappings.Remove(mapping);
break;
}
}
ASSERT_PRINT(mapping != NULL, "page: %p, page number: %#"
B_PRIxPHYSADDR ", accessed: %d, modified: %d", page,
pageNumber, accessed, modified);
} else
page->DecrementWiredCount();
if (mappingsQueue == NULL)
recursive_lock_unlock(&fLock);
if (!page->IsMapped()) {
atomic_add(&gMappedPagesCount, -1);
if (updatePageQueue) {
if (page->Cache()->temporary)
vm_page_set_state(page, PAGE_STATE_INACTIVE);
else if (page->modified)
vm_page_set_state(page, PAGE_STATE_MODIFIED);
else
vm_page_set_state(page, PAGE_STATE_CACHED);
}
}
if (mappingsQueue != NULL) {
DEBUG_PAGE_ACCESS_END(page);
}
if (mapping != NULL) {
if (mappingsQueue == NULL) {
bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
vm_free_page_mapping(pageNumber, mapping,
CACHE_DONT_WAIT_FOR_MEMORY
| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0));
} else {
mappingsQueue->Add(mapping);
}
}
}
void
VMTranslationMap::UnaccessedPageUnmapped(VMArea* area, page_num_t pageNumber)
{
if (area->cache_type == CACHE_TYPE_DEVICE) {
recursive_lock_unlock(&fLock);
return;
}
vm_page* page = vm_lookup_page(pageNumber);
ASSERT_PRINT(page != NULL, "page number: %#" B_PRIxPHYSADDR, pageNumber);
vm_page_mapping* mapping = NULL;
if (area->wiring == B_NO_LOCK) {
vm_page_mappings::Iterator iterator = page->mappings.GetIterator();
while ((mapping = iterator.Next()) != NULL) {
if (mapping->area == area) {
area->mappings.Remove(mapping);
page->mappings.Remove(mapping);
break;
}
}
ASSERT_PRINT(mapping != NULL, "page: %p, page number: %#"
B_PRIxPHYSADDR, page, pageNumber);
} else
page->DecrementWiredCount();
recursive_lock_unlock(&fLock);
if (!page->IsMapped())
atomic_add(&gMappedPagesCount, -1);
if (mapping != NULL) {
vm_free_page_mapping(pageNumber, mapping,
CACHE_DONT_WAIT_FOR_MEMORY | CACHE_DONT_LOCK_KERNEL_SPACE);
}
}
void
VMTranslationMap::InvalidateUserTLB(CPUSet cpus, intptr_t context)
{
int32 cpu = smp_get_current_cpu();
const bool current = cpus.GetBit(cpu);
cpus.ClearBit(cpu);
if (!cpus.IsEmpty()) {
if (current)
cpus.SetBit(cpu);
smp_multicast_ici(cpus, SMP_MSG_USER_INVALIDATE_PAGES,
context, 0, 0, NULL, SMP_MSG_FLAG_SYNC);
} else if (current) {
cpu_status state = disable_interrupts();
arch_cpu_user_tlb_invalidate(context);
restore_interrupts(state);
}
}
void
VMTranslationMap::InvalidateTLBList(CPUSet cpus, intptr_t context,
addr_t* invalidPages, int32 count)
{
int32 cpu = smp_get_current_cpu();
const bool current = cpus.GetBit(cpu);
cpus.ClearBit(cpu);
if (!cpus.IsEmpty()) {
if (current)
cpus.SetBit(cpu);
smp_multicast_ici(cpus, SMP_MSG_INVALIDATE_PAGE_LIST,
context, (addr_t)invalidPages, count, NULL,
SMP_MSG_FLAG_SYNC);
} else if (current) {
arch_cpu_invalidate_tlb_list(context, invalidPages, count);
}
}
VMTranslationMap::ReverseMappingInfoCallback::~ReverseMappingInfoCallback()
{
}
VMPhysicalPageMapper::VMPhysicalPageMapper()
{
}
VMPhysicalPageMapper::~VMPhysicalPageMapper()
{
}