#include "paging/460/PPCVMTranslationMap460.h"
#include <stdlib.h>
#include <string.h>
#include <arch/cpu.h>
#include <arch_mmu.h>
#include <interrupts.h>
#include <thread.h>
#include <slab/Slab.h>
#include <smp.h>
#include <util/AutoLock.h>
#include <util/ThreadAutoLock.h>
#include <util/queue.h>
#include <vm/vm_page.h>
#include <vm/vm_priv.h>
#include <vm/VMAddressSpace.h>
#include <vm/VMCache.h>
#include "paging/460/PPCPagingMethod460.h"
#include "paging/460/PPCPagingStructures460.h"
#include "generic_vm_physical_page_mapper.h"
#include "generic_vm_physical_page_ops.h"
#include "GenericVMPhysicalPageMapper.h"
#ifdef TRACE_PPC_VM_TRANSLATION_MAP_460
# define TRACE(x...) dprintf(x)
#else
# define TRACE(x...) ;
#endif
#define MAX_VSID_BASES (B_PAGE_SIZE * 8)
static uint32 sVSIDBaseBitmap[MAX_VSID_BASES / (sizeof(uint32) * 8)];
static spinlock sVSIDBaseBitmapLock;
#define VSID_BASE_SHIFT 3
#define VADDR_TO_VSID(vsidBase, vaddr) (vsidBase + ((vaddr) >> 28))
PPCVMTranslationMap460::PPCVMTranslationMap460()
:
fPagingStructures(NULL)
{
}
PPCVMTranslationMap460::~PPCVMTranslationMap460()
{
if (fPagingStructures == NULL)
return;
#if 0
if (fPageMapper != NULL)
fPageMapper->Delete();
#endif
if (fMapCount > 0) {
panic("vm_translation_map.destroy_tmap: map %p has positive map count %ld\n",
this, fMapCount);
}
int baseBit = fVSIDBase >> VSID_BASE_SHIFT;
atomic_and((int32 *)&sVSIDBaseBitmap[baseBit / 32],
~(1 << (baseBit % 32)));
#if 0
if (fPagingStructures->pgdir_virt != NULL) {
for (uint32 i = VADDR_TO_PDENT(USER_BASE);
i <= VADDR_TO_PDENT(USER_BASE + (USER_SIZE - 1)); i++) {
if ((fPagingStructures->pgdir_virt[i] & PPC_PDE_PRESENT) != 0) {
addr_t address = fPagingStructures->pgdir_virt[i]
& PPC_PDE_ADDRESS_MASK;
vm_page* page = vm_lookup_page(address / B_PAGE_SIZE);
if (!page)
panic("destroy_tmap: didn't find pgtable page\n");
DEBUG_PAGE_ACCESS_START(page);
vm_page_free_etc(NULL, page, &reservation);
}
}
}
#endif
fPagingStructures->RemoveReference();
}
status_t
PPCVMTranslationMap460::Init(bool kernel)
{
TRACE("PPCVMTranslationMap460::Init()\n");
PPCVMTranslationMap::Init(kernel);
cpu_status state = disable_interrupts();
acquire_spinlock(&sVSIDBaseBitmapLock);
if (kernel) {
fVSIDBase = 0;
sVSIDBaseBitmap[0] |= 0x3;
} else {
int i = 0;
while (i < MAX_VSID_BASES) {
if (sVSIDBaseBitmap[i / 32] == 0xffffffff) {
i += 32;
continue;
}
if ((sVSIDBaseBitmap[i / 32] & (1 << (i % 32))) == 0) {
sVSIDBaseBitmap[i / 32] |= 1 << (i % 32);
break;
}
i++;
}
if (i >= MAX_VSID_BASES)
panic("vm_translation_map_create: out of VSID bases\n");
fVSIDBase = i << VSID_BASE_SHIFT;
}
release_spinlock(&sVSIDBaseBitmapLock);
restore_interrupts(state);
fPagingStructures = new(std::nothrow) PPCPagingStructures460;
if (fPagingStructures == NULL)
return B_NO_MEMORY;
PPCPagingMethod460* method = PPCPagingMethod460::Method();
if (!kernel) {
#if 0
status_t error = method->PhysicalPageMapper()
->CreateTranslationMapPhysicalPageMapper(&fPageMapper);
if (error != B_OK)
return error;
#endif
#if 0
page_directory_entry* virtualPageDir = (page_directory_entry*)memalign(
B_PAGE_SIZE, B_PAGE_SIZE);
if (virtualPageDir == NULL)
return B_NO_MEMORY;
phys_addr_t physicalPageDir;
vm_get_page_mapping(VMAddressSpace::KernelID(),
(addr_t)virtualPageDir, &physicalPageDir);
#endif
fPagingStructures->Init(
method->PageTable());
} else {
#if 0
fPageMapper = method->KernelPhysicalPageMapper();
#endif
fPagingStructures->Init(
method->PageTable());
}
return B_OK;
}
void
PPCVMTranslationMap460::ChangeASID()
{
#if KERNEL_BASE != 0x80000000
#error fix me
#endif
int vsidBase = VSIDBase();
isync();
asm("mtsr 0,%0" : : "g"(vsidBase));
asm("mtsr 1,%0" : : "g"(vsidBase + 1));
asm("mtsr 2,%0" : : "g"(vsidBase + 2));
asm("mtsr 3,%0" : : "g"(vsidBase + 3));
asm("mtsr 4,%0" : : "g"(vsidBase + 4));
asm("mtsr 5,%0" : : "g"(vsidBase + 5));
asm("mtsr 6,%0" : : "g"(vsidBase + 6));
asm("mtsr 7,%0" : : "g"(vsidBase + 7));
isync();
}
page_table_entry *
PPCVMTranslationMap460::LookupPageTableEntry(addr_t virtualAddress)
{
uint32 virtualSegmentID = VADDR_TO_VSID(fVSIDBase, virtualAddress);
PPCPagingMethod460* m = PPCPagingMethod460::Method();
uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, virtualAddress);
page_table_entry_group *group = &(m->PageTable())[hash & m->PageTableHashMask()];
for (int i = 0; i < 8; i++) {
page_table_entry *entry = &group->entry[i];
if (entry->virtual_segment_id == virtualSegmentID
&& entry->secondary_hash == false
&& entry->abbr_page_index == ((virtualAddress >> 22) & 0x3f))
return entry;
}
hash = page_table_entry::SecondaryHash(hash);
group = &(m->PageTable())[hash & m->PageTableHashMask()];
for (int i = 0; i < 8; i++) {
page_table_entry *entry = &group->entry[i];
if (entry->virtual_segment_id == virtualSegmentID
&& entry->secondary_hash == true
&& entry->abbr_page_index == ((virtualAddress >> 22) & 0x3f))
return entry;
}
return NULL;
}
bool
PPCVMTranslationMap460::RemovePageTableEntry(addr_t virtualAddress)
{
page_table_entry *entry = LookupPageTableEntry(virtualAddress);
if (entry == NULL)
return false;
entry->valid = 0;
ppc_sync();
tlbie(virtualAddress);
eieio();
tlbsync();
ppc_sync();
return true;
}
size_t
PPCVMTranslationMap460::MaxPagesNeededToMap(addr_t start, addr_t end) const
{
return 0;
}
status_t
PPCVMTranslationMap460::Map(addr_t virtualAddress,
phys_addr_t physicalAddress, uint32 attributes,
uint32 memoryType, vm_page_reservation* reservation)
{
TRACE("map_tmap: entry pa 0x%lx va 0x%lx\n", pa, va);
uint32 virtualSegmentID = VADDR_TO_VSID(fVSIDBase, virtualAddress);
uint32 protection = 0;
if (attributes & (B_READ_AREA | B_WRITE_AREA))
protection = (attributes & B_WRITE_AREA) ? PTE_READ_WRITE : PTE_READ_ONLY;
PPCPagingMethod460* m = PPCPagingMethod460::Method();
uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, virtualAddress);
page_table_entry_group *group = &(m->PageTable())[hash & m->PageTableHashMask()];
for (int i = 0; i < 8; i++) {
page_table_entry *entry = &group->entry[i];
if (entry->valid)
continue;
m->FillPageTableEntry(entry, virtualSegmentID, virtualAddress,
physicalAddress, protection, memoryType, false);
fMapCount++;
return B_OK;
}
hash = page_table_entry::SecondaryHash(hash);
group = &(m->PageTable())[hash & m->PageTableHashMask()];
for (int i = 0; i < 8; i++) {
page_table_entry *entry = &group->entry[i];
if (entry->valid)
continue;
m->FillPageTableEntry(entry, virtualSegmentID, virtualAddress,
physicalAddress, protection, memoryType, false);
fMapCount++;
return B_OK;
}
panic("vm_translation_map.map_tmap: hash table full\n");
return B_ERROR;
#if 0
page_directory_entry* pd = fPagingStructures->pgdir_virt;
uint32 index = VADDR_TO_PDENT(va);
if ((pd[index] & PPC_PDE_PRESENT) == 0) {
phys_addr_t pgtable;
vm_page *page;
page = vm_page_allocate_page(reservation,
PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
DEBUG_PAGE_ACCESS_END(page);
pgtable = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
TRACE("map_tmap: asked for free page for pgtable. 0x%lx\n", pgtable);
PPCPagingMethod460::PutPageTableInPageDir(&pd[index], pgtable,
attributes
| ((attributes & B_USER_PROTECTION) != 0
? B_WRITE_AREA : B_KERNEL_WRITE_AREA));
if (index >= FIRST_KERNEL_PGDIR_ENT
&& index < (FIRST_KERNEL_PGDIR_ENT + NUM_KERNEL_PGDIR_ENTS)) {
PPCPagingStructures460::UpdateAllPageDirs(index, pd[index]);
}
fMapCount++;
}
Thread* thread = thread_get_current_thread();
ThreadCPUPinner pinner(thread);
page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
pd[index] & PPC_PDE_ADDRESS_MASK);
index = VADDR_TO_PTENT(va);
ASSERT_PRINT((pt[index] & PPC_PTE_PRESENT) == 0,
"virtual address: %#" B_PRIxADDR ", existing pte: %#" B_PRIx32, va,
pt[index]);
PPCPagingMethod460::PutPageTableEntryInTable(&pt[index], pa, attributes,
memoryType, fIsKernelMap);
pinner.Unlock();
fMapCount++;
return 0;
#endif
}
status_t
PPCVMTranslationMap460::Unmap(addr_t start, addr_t end)
{
page_table_entry *entry;
start = ROUNDDOWN(start, B_PAGE_SIZE);
end = ROUNDUP(end, B_PAGE_SIZE);
if (start >= end)
return B_OK;
TRACE("unmap_tmap: asked to free pages 0x%lx to 0x%lx\n", start, end);
while (start < end) {
if (RemovePageTableEntry(start))
fMapCount--;
start += B_PAGE_SIZE;
}
return B_OK;
#if 0
start = ROUNDDOWN(start, B_PAGE_SIZE);
if (start >= end)
return B_OK;
TRACE("unmap_tmap: asked to free pages 0x%lx to 0x%lx\n", start, end);
page_directory_entry *pd = fPagingStructures->pgdir_virt;
do {
int index = VADDR_TO_PDENT(start);
if ((pd[index] & PPC_PDE_PRESENT) == 0) {
start = ROUNDUP(start + 1, kPageTableAlignment);
continue;
}
Thread* thread = thread_get_current_thread();
ThreadCPUPinner pinner(thread);
page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
pd[index] & PPC_PDE_ADDRESS_MASK);
for (index = VADDR_TO_PTENT(start); (index < 1024) && (start < end);
index++, start += B_PAGE_SIZE) {
if ((pt[index] & PPC_PTE_PRESENT) == 0) {
continue;
}
TRACE("unmap_tmap: removing page 0x%lx\n", start);
page_table_entry oldEntry
= PPCPagingMethod460::ClearPageTableEntryFlags(&pt[index],
PPC_PTE_PRESENT);
fMapCount--;
if ((oldEntry & PPC_PTE_ACCESSED) != 0) {
InvalidatePage(start);
}
}
} while (start != 0 && start < end);
return B_OK;
#endif
}
status_t
PPCVMTranslationMap460::RemapAddressRange(addr_t *_virtualAddress,
size_t size, bool unmap)
{
addr_t virtualAddress = ROUNDDOWN(*_virtualAddress, B_PAGE_SIZE);
size = ROUNDUP(*_virtualAddress + size - virtualAddress, B_PAGE_SIZE);
VMAddressSpace *addressSpace = VMAddressSpace::Kernel();
void *newAddress = NULL;
status_t error = vm_reserve_address_range(addressSpace->ID(), &newAddress,
B_ANY_KERNEL_ADDRESS, size, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
if (error != B_OK)
return error;
page_table_entry *entry = LookupPageTableEntry(virtualAddress);
if (!entry)
return B_ERROR;
phys_addr_t physicalBase = (phys_addr_t)entry->physical_page_number << 12;
error = ppc_map_address_range((addr_t)newAddress, physicalBase, size);
if (error != B_OK)
return error;
*_virtualAddress = (addr_t)newAddress;
if (unmap)
ppc_unmap_address_range(virtualAddress, size);
return B_OK;
}
status_t
PPCVMTranslationMap460::UnmapPage(VMArea* area, addr_t address,
bool updatePageQueue, bool deletingAddressSpace, uint32* _flags)
{
ASSERT(address % B_PAGE_SIZE == 0);
ASSERT(_flags == NULL || !updatePageQueue);
RecursiveLocker locker(fLock);
if (area->cache_type == CACHE_TYPE_DEVICE) {
if (!RemovePageTableEntry(address))
return B_ENTRY_NOT_FOUND;
fMapCount--;
return B_OK;
}
page_table_entry* entry = LookupPageTableEntry(address);
if (entry == NULL)
return B_ENTRY_NOT_FOUND;
page_num_t pageNumber = entry->physical_page_number;
bool accessed = entry->referenced;
bool modified = entry->changed;
RemovePageTableEntry(address);
fMapCount--;
if (_flags == NULL) {
locker.Detach();
PageUnmapped(area, pageNumber, accessed, modified, updatePageQueue);
} else {
uint32 flags = PAGE_PRESENT;
if (accessed)
flags |= PAGE_ACCESSED;
if (modified)
flags |= PAGE_MODIFIED;
*_flags = flags;
}
return B_OK;
#if 0
ASSERT(address % B_PAGE_SIZE == 0);
page_directory_entry* pd = fPagingStructures->pgdir_virt;
TRACE("PPCVMTranslationMap460::UnmapPage(%#" B_PRIxADDR ")\n", address);
RecursiveLocker locker(fLock);
int index = VADDR_TO_PDENT(address);
if ((pd[index] & PPC_PDE_PRESENT) == 0)
return B_ENTRY_NOT_FOUND;
ThreadCPUPinner pinner(thread_get_current_thread());
page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
pd[index] & PPC_PDE_ADDRESS_MASK);
index = VADDR_TO_PTENT(address);
page_table_entry oldEntry = PPCPagingMethod460::ClearPageTableEntry(
&pt[index]);
pinner.Unlock();
if ((oldEntry & PPC_PTE_PRESENT) == 0) {
return B_ENTRY_NOT_FOUND;
}
fMapCount--;
if ((oldEntry & PPC_PTE_ACCESSED) != 0) {
InvalidatePage(address);
Flush();
}
locker.Detach();
PageUnmapped(area, (oldEntry & PPC_PTE_ADDRESS_MASK) / B_PAGE_SIZE,
(oldEntry & PPC_PTE_ACCESSED) != 0, (oldEntry & PPC_PTE_DIRTY) != 0,
updatePageQueue);
return B_OK;
#endif
}
void
PPCVMTranslationMap460::UnmapPages(VMArea* area, addr_t base, size_t size,
bool updatePageQueue, bool deletingAddressSpace)
{
panic("%s: UNIMPLEMENTED", __FUNCTION__);
#if 0
if (size == 0)
return;
addr_t start = base;
addr_t end = base + size - 1;
TRACE("PPCVMTranslationMap460::UnmapPages(%p, %#" B_PRIxADDR ", %#"
B_PRIxADDR ")\n", area, start, end);
page_directory_entry* pd = fPagingStructures->pgdir_virt;
VMAreaMappings queue;
RecursiveLocker locker(fLock);
do {
int index = VADDR_TO_PDENT(start);
if ((pd[index] & PPC_PDE_PRESENT) == 0) {
start = ROUNDUP(start + 1, kPageTableAlignment);
continue;
}
Thread* thread = thread_get_current_thread();
ThreadCPUPinner pinner(thread);
page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
pd[index] & PPC_PDE_ADDRESS_MASK);
for (index = VADDR_TO_PTENT(start); (index < 1024) && (start < end);
index++, start += B_PAGE_SIZE) {
page_table_entry oldEntry
= PPCPagingMethod460::ClearPageTableEntry(&pt[index]);
if ((oldEntry & PPC_PTE_PRESENT) == 0)
continue;
fMapCount--;
if ((oldEntry & PPC_PTE_ACCESSED) != 0) {
if (!deletingAddressSpace)
InvalidatePage(start);
}
if (area->cache_type != CACHE_TYPE_DEVICE) {
page_num_t page = (oldEntry & PPC_PTE_ADDRESS_MASK) / B_PAGE_SIZE;
PageUnmapped(area, page,
(oldEntry & PPC_PTE_ACCESSED) != 0,
(oldEntry & PPC_PTE_DIRTY) != 0,
updatePageQueue, &queue);
}
}
Flush();
} while (start != 0 && start < end);
locker.Unlock();
bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
while (vm_page_mapping* mapping = queue.RemoveHead())
vm_free_page_mapping(mapping->page->physical_page_number, mapping, freeFlags);
#endif
}
status_t
PPCVMTranslationMap460::Query(addr_t va, phys_addr_t *_outPhysical,
uint32 *_outFlags)
{
page_table_entry *entry;
*_outFlags = 0;
*_outPhysical = 0;
entry = LookupPageTableEntry(va);
if (entry == NULL)
return B_NO_ERROR;
if (IS_KERNEL_ADDRESS(va))
*_outFlags |= B_KERNEL_READ_AREA | (entry->page_protection == PTE_READ_ONLY ? 0 : B_KERNEL_WRITE_AREA);
else
*_outFlags |= B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_READ_AREA | (entry->page_protection == PTE_READ_ONLY ? 0 : B_WRITE_AREA);
*_outFlags |= entry->changed ? PAGE_MODIFIED : 0;
*_outFlags |= entry->referenced ? PAGE_ACCESSED : 0;
*_outFlags |= entry->valid ? PAGE_PRESENT : 0;
*_outPhysical = entry->physical_page_number * B_PAGE_SIZE;
return B_OK;
#if 0
*_flags = 0;
*_physical = 0;
int index = VADDR_TO_PDENT(va);
page_directory_entry *pd = fPagingStructures->pgdir_virt;
if ((pd[index] & PPC_PDE_PRESENT) == 0) {
return B_OK;
}
Thread* thread = thread_get_current_thread();
ThreadCPUPinner pinner(thread);
page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
pd[index] & PPC_PDE_ADDRESS_MASK);
page_table_entry entry = pt[VADDR_TO_PTENT(va)];
*_physical = entry & PPC_PDE_ADDRESS_MASK;
if ((entry & PPC_PTE_USER) != 0) {
*_flags |= ((entry & PPC_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
| B_READ_AREA;
}
*_flags |= ((entry & PPC_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
| B_KERNEL_READ_AREA
| ((entry & PPC_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
| ((entry & PPC_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
| ((entry & PPC_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
pinner.Unlock();
TRACE("query_tmap: returning pa 0x%lx for va 0x%lx\n", *_physical, va);
return B_OK;
#endif
}
status_t
PPCVMTranslationMap460::QueryInterrupt(addr_t virtualAddress,
phys_addr_t *_physicalAddress, uint32 *_flags)
{
return PPCVMTranslationMap460::Query(virtualAddress, _physicalAddress, _flags);
#if 0
*_flags = 0;
*_physical = 0;
int index = VADDR_TO_PDENT(va);
page_directory_entry* pd = fPagingStructures->pgdir_virt;
if ((pd[index] & PPC_PDE_PRESENT) == 0) {
return B_OK;
}
page_table_entry* pt = (page_table_entry*)PPCPagingMethod460::Method()
->PhysicalPageMapper()->InterruptGetPageTableAt(
pd[index] & PPC_PDE_ADDRESS_MASK);
page_table_entry entry = pt[VADDR_TO_PTENT(va)];
*_physical = entry & PPC_PDE_ADDRESS_MASK;
if ((entry & PPC_PTE_USER) != 0) {
*_flags |= ((entry & PPC_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
| B_READ_AREA;
}
*_flags |= ((entry & PPC_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
| B_KERNEL_READ_AREA
| ((entry & PPC_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
| ((entry & PPC_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
| ((entry & PPC_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
return B_OK;
#endif
}
status_t
PPCVMTranslationMap460::Protect(addr_t start, addr_t end, uint32 attributes,
uint32 memoryType)
{
return B_ERROR;
#if 0
start = ROUNDDOWN(start, B_PAGE_SIZE);
if (start >= end)
return B_OK;
TRACE("protect_tmap: pages 0x%lx to 0x%lx, attributes %lx\n", start, end,
attributes);
uint32 newProtectionFlags = 0;
if ((attributes & B_USER_PROTECTION) != 0) {
newProtectionFlags = PPC_PTE_USER;
if ((attributes & B_WRITE_AREA) != 0)
newProtectionFlags |= PPC_PTE_WRITABLE;
} else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
newProtectionFlags = PPC_PTE_WRITABLE;
page_directory_entry *pd = fPagingStructures->pgdir_virt;
do {
int index = VADDR_TO_PDENT(start);
if ((pd[index] & PPC_PDE_PRESENT) == 0) {
start = ROUNDUP(start + 1, kPageTableAlignment);
continue;
}
Thread* thread = thread_get_current_thread();
ThreadCPUPinner pinner(thread);
page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
pd[index] & PPC_PDE_ADDRESS_MASK);
for (index = VADDR_TO_PTENT(start); index < 1024 && start < end;
index++, start += B_PAGE_SIZE) {
page_table_entry entry = pt[index];
if ((entry & PPC_PTE_PRESENT) == 0) {
continue;
}
TRACE("protect_tmap: protect page 0x%lx\n", start);
page_table_entry oldEntry;
while (true) {
oldEntry = PPCPagingMethod460::TestAndSetPageTableEntry(
&pt[index],
(entry & ~(PPC_PTE_PROTECTION_MASK
| PPC_PTE_MEMORY_TYPE_MASK))
| newProtectionFlags
| PPCPagingMethod460::MemoryTypeToPageTableEntryFlags(
memoryType),
entry);
if (oldEntry == entry)
break;
entry = oldEntry;
}
if ((oldEntry & PPC_PTE_ACCESSED) != 0) {
InvalidatePage(start);
}
}
} while (start != 0 && start < end);
return B_OK;
#endif
}
status_t
PPCVMTranslationMap460::ClearFlags(addr_t virtualAddress, uint32 flags)
{
page_table_entry *entry = LookupPageTableEntry(virtualAddress);
if (entry == NULL)
return B_NO_ERROR;
bool modified = false;
if (flags & PAGE_MODIFIED && entry->changed) {
entry->changed = false;
modified = true;
}
if (flags & PAGE_ACCESSED && entry->referenced) {
entry->referenced = false;
modified = true;
}
if (modified) {
tlbie(virtualAddress);
eieio();
tlbsync();
ppc_sync();
}
return B_OK;
#if 0
int index = VADDR_TO_PDENT(va);
page_directory_entry* pd = fPagingStructures->pgdir_virt;
if ((pd[index] & PPC_PDE_PRESENT) == 0) {
return B_OK;
}
uint32 flagsToClear = ((flags & PAGE_MODIFIED) ? PPC_PTE_DIRTY : 0)
| ((flags & PAGE_ACCESSED) ? PPC_PTE_ACCESSED : 0);
Thread* thread = thread_get_current_thread();
ThreadCPUPinner pinner(thread);
page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
pd[index] & PPC_PDE_ADDRESS_MASK);
index = VADDR_TO_PTENT(va);
page_table_entry oldEntry
= PPCPagingMethod460::ClearPageTableEntryFlags(&pt[index],
flagsToClear);
pinner.Unlock();
if ((oldEntry & flagsToClear) != 0)
InvalidatePage(va);
return B_OK;
#endif
}
bool
PPCVMTranslationMap460::ClearAccessedAndModified(VMArea* area,
addr_t address, bool unmapIfUnaccessed, bool& _modified)
{
ASSERT(address % B_PAGE_SIZE == 0);
RecursiveLocker locker(fLock);
uint32 flags;
phys_addr_t physicalAddress;
if (Query(address, &physicalAddress, &flags) != B_OK
|| (flags & PAGE_PRESENT) == 0) {
return false;
}
_modified = (flags & PAGE_MODIFIED) != 0;
if ((flags & (PAGE_ACCESSED | PAGE_MODIFIED)) != 0)
ClearFlags(address, flags & (PAGE_ACCESSED | PAGE_MODIFIED));
if ((flags & PAGE_ACCESSED) != 0)
return true;
if (!unmapIfUnaccessed)
return false;
locker.Unlock();
UnmapPage(area, address, false);
vm_page* page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
if (page == NULL)
return false;
_modified |= page->modified;
return page->accessed;
#if 0
page_directory_entry* pd = fPagingStructures->pgdir_virt;
TRACE("PPCVMTranslationMap460::ClearAccessedAndModified(%#" B_PRIxADDR
")\n", address);
RecursiveLocker locker(fLock);
int index = VADDR_TO_PDENT(address);
if ((pd[index] & PPC_PDE_PRESENT) == 0)
return false;
ThreadCPUPinner pinner(thread_get_current_thread());
page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
pd[index] & PPC_PDE_ADDRESS_MASK);
index = VADDR_TO_PTENT(address);
page_table_entry oldEntry;
if (unmapIfUnaccessed) {
while (true) {
oldEntry = pt[index];
if ((oldEntry & PPC_PTE_PRESENT) == 0) {
return false;
}
if (oldEntry & PPC_PTE_ACCESSED) {
oldEntry = PPCPagingMethod460::ClearPageTableEntryFlags(
&pt[index], PPC_PTE_ACCESSED | PPC_PTE_DIRTY);
break;
}
if (PPCPagingMethod460::TestAndSetPageTableEntry(&pt[index], 0,
oldEntry) == oldEntry) {
break;
}
}
} else {
oldEntry = PPCPagingMethod460::ClearPageTableEntryFlags(&pt[index],
PPC_PTE_ACCESSED | PPC_PTE_DIRTY);
}
pinner.Unlock();
_modified = (oldEntry & PPC_PTE_DIRTY) != 0;
if ((oldEntry & PPC_PTE_ACCESSED) != 0) {
InvalidatePage(address);
Flush();
return true;
}
if (!unmapIfUnaccessed)
return false;
fMapCount--;
locker.Detach();
UnaccessedPageUnmapped(area,
(oldEntry & PPC_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
return false;
#endif
}
PPCPagingStructures*
PPCVMTranslationMap460::PagingStructures() const
{
return fPagingStructures;
}