#include <vm/VMCache.h>
#include <stddef.h>
#include <stdlib.h>
#include <algorithm>
#include <arch/cpu.h>
#include <condition_variable.h>
#include <heap.h>
#include <interrupts.h>
#include <kernel.h>
#include <slab/Slab.h>
#include <smp.h>
#include <thread.h>
#include <tracing.h>
#include <util/AutoLock.h>
#include <vfs.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
#include <vm/vm_priv.h>
#include <vm/vm_types.h>
#include <vm/VMAddressSpace.h>
#include <vm/VMArea.h>
#include "VMAnonymousCache.h"
#include "VMAnonymousNoSwapCache.h"
#include "VMDeviceCache.h"
#include "VMNullCache.h"
#include "../cache/vnode_store.h"
#ifdef TRACE_VM_CACHE
# define TRACE(x) dprintf x
#else
# define TRACE(x) ;
#endif
#if DEBUG_CACHE_LIST
VMCache* gDebugCacheList;
#endif
static rw_lock sCacheListLock = RW_LOCK_INITIALIZER("global VMCache list");
ObjectCache* gCacheRefObjectCache;
#if ENABLE_SWAP_SUPPORT
ObjectCache* gAnonymousCacheObjectCache;
#endif
ObjectCache* gAnonymousNoSwapCacheObjectCache;
ObjectCache* gVnodeCacheObjectCache;
ObjectCache* gDeviceCacheObjectCache;
ObjectCache* gNullCacheObjectCache;
struct VMCache::PageEventWaiter {
Thread* thread;
PageEventWaiter* next;
vm_page* page;
uint32 events;
};
#if VM_CACHE_TRACING
namespace VMCacheTracing {
class VMCacheTraceEntry : public AbstractTraceEntry {
public:
VMCacheTraceEntry(VMCache* cache)
:
fCache(cache)
{
#if VM_CACHE_TRACING_STACK_TRACE
fStackTrace = capture_tracing_stack_trace(
VM_CACHE_TRACING_STACK_TRACE, 0, true);
#endif
}
#if VM_CACHE_TRACING_STACK_TRACE
virtual void DumpStackTrace(TraceOutput& out)
{
out.PrintStackTrace(fStackTrace);
}
#endif
VMCache* Cache() const
{
return fCache;
}
protected:
VMCache* fCache;
#if VM_CACHE_TRACING_STACK_TRACE
tracing_stack_trace* fStackTrace;
#endif
};
class Create : public VMCacheTraceEntry {
public:
Create(VMCache* cache)
:
VMCacheTraceEntry(cache)
{
Initialized();
}
virtual void AddDump(TraceOutput& out)
{
out.Print("vm cache create: -> cache: %p", fCache);
}
};
class Delete : public VMCacheTraceEntry {
public:
Delete(VMCache* cache)
:
VMCacheTraceEntry(cache)
{
Initialized();
}
virtual void AddDump(TraceOutput& out)
{
out.Print("vm cache delete: cache: %p", fCache);
}
};
class SetMinimalCommitment : public VMCacheTraceEntry {
public:
SetMinimalCommitment(VMCache* cache, off_t commitment)
:
VMCacheTraceEntry(cache),
fOldCommitment(cache->Commitment()),
fCommitment(commitment)
{
Initialized();
}
virtual void AddDump(TraceOutput& out)
{
out.Print("vm cache set min commitment: cache: %p, "
"commitment: %" B_PRIdOFF " -> %" B_PRIdOFF, fCache,
fOldCommitment, fCommitment);
}
private:
off_t fOldCommitment;
off_t fCommitment;
};
class Resize : public VMCacheTraceEntry {
public:
Resize(VMCache* cache, off_t size)
:
VMCacheTraceEntry(cache),
fOldSize(cache->virtual_end),
fSize(size)
{
Initialized();
}
virtual void AddDump(TraceOutput& out)
{
out.Print("vm cache resize: cache: %p, size: %" B_PRIdOFF " -> %"
B_PRIdOFF, fCache, fOldSize, fSize);
}
private:
off_t fOldSize;
off_t fSize;
};
class Rebase : public VMCacheTraceEntry {
public:
Rebase(VMCache* cache, off_t base)
:
VMCacheTraceEntry(cache),
fOldBase(cache->virtual_base),
fBase(base)
{
Initialized();
}
virtual void AddDump(TraceOutput& out)
{
out.Print("vm cache rebase: cache: %p, base: %lld -> %lld", fCache,
fOldBase, fBase);
}
private:
off_t fOldBase;
off_t fBase;
};
class AddConsumer : public VMCacheTraceEntry {
public:
AddConsumer(VMCache* cache, VMCache* consumer)
:
VMCacheTraceEntry(cache),
fConsumer(consumer)
{
Initialized();
}
virtual void AddDump(TraceOutput& out)
{
out.Print("vm cache add consumer: cache: %p, consumer: %p", fCache,
fConsumer);
}
VMCache* Consumer() const
{
return fConsumer;
}
private:
VMCache* fConsumer;
};
class RemoveConsumer : public VMCacheTraceEntry {
public:
RemoveConsumer(VMCache* cache, VMCache* consumer)
:
VMCacheTraceEntry(cache),
fConsumer(consumer)
{
Initialized();
}
virtual void AddDump(TraceOutput& out)
{
out.Print("vm cache remove consumer: cache: %p, consumer: %p",
fCache, fConsumer);
}
private:
VMCache* fConsumer;
};
class Merge : public VMCacheTraceEntry {
public:
Merge(VMCache* cache, VMCache* consumer)
:
VMCacheTraceEntry(cache),
fConsumer(consumer)
{
Initialized();
}
virtual void AddDump(TraceOutput& out)
{
out.Print("vm cache merge with consumer: cache: %p, consumer: %p",
fCache, fConsumer);
}
private:
VMCache* fConsumer;
};
class InsertArea : public VMCacheTraceEntry {
public:
InsertArea(VMCache* cache, VMArea* area)
:
VMCacheTraceEntry(cache),
fArea(area)
{
Initialized();
}
virtual void AddDump(TraceOutput& out)
{
out.Print("vm cache insert area: cache: %p, area: %p", fCache,
fArea);
}
VMArea* Area() const
{
return fArea;
}
private:
VMArea* fArea;
};
class RemoveArea : public VMCacheTraceEntry {
public:
RemoveArea(VMCache* cache, VMArea* area)
:
VMCacheTraceEntry(cache),
fArea(area)
{
Initialized();
}
virtual void AddDump(TraceOutput& out)
{
out.Print("vm cache remove area: cache: %p, area: %p", fCache,
fArea);
}
private:
VMArea* fArea;
};
}
# define T(x) new(std::nothrow) VMCacheTracing::x;
# if VM_CACHE_TRACING >= 2
namespace VMCacheTracing {
class InsertPage : public VMCacheTraceEntry {
public:
InsertPage(VMCache* cache, vm_page* page, off_t offset)
:
VMCacheTraceEntry(cache),
fPage(page),
fOffset(offset)
{
Initialized();
}
virtual void AddDump(TraceOutput& out)
{
out.Print("vm cache insert page: cache: %p, page: %p, offset: %"
B_PRIdOFF, fCache, fPage, fOffset);
}
private:
vm_page* fPage;
off_t fOffset;
};
class RemovePage : public VMCacheTraceEntry {
public:
RemovePage(VMCache* cache, vm_page* page)
:
VMCacheTraceEntry(cache),
fPage(page)
{
Initialized();
}
virtual void AddDump(TraceOutput& out)
{
out.Print("vm cache remove page: cache: %p, page: %p", fCache,
fPage);
}
private:
vm_page* fPage;
};
}
# define T2(x) new(std::nothrow) VMCacheTracing::x;
# else
# define T2(x) ;
# endif
#else
# define T(x) ;
# define T2(x) ;
#endif
#if VM_CACHE_TRACING
static void*
cache_stack_find_area_cache(const TraceEntryIterator& baseIterator, void* area)
{
using namespace VMCacheTracing;
TraceEntryIterator iterator = baseIterator;
TraceEntry* entry = iterator.Current();
while (entry != NULL) {
if (InsertArea* insertAreaEntry = dynamic_cast<InsertArea*>(entry)) {
if (insertAreaEntry->Area() == area)
return insertAreaEntry->Cache();
}
entry = iterator.Previous();
}
return NULL;
}
static void*
cache_stack_find_consumer(const TraceEntryIterator& baseIterator, void* cache)
{
using namespace VMCacheTracing;
TraceEntryIterator iterator = baseIterator;
TraceEntry* entry = iterator.Current();
while (entry != NULL) {
if (Create* createEntry = dynamic_cast<Create*>(entry)) {
if (createEntry->Cache() == cache)
return NULL;
} else if (AddConsumer* addEntry = dynamic_cast<AddConsumer*>(entry)) {
if (addEntry->Consumer() == cache)
return addEntry->Cache();
}
entry = iterator.Previous();
}
return NULL;
}
static int
command_cache_stack(int argc, char** argv)
{
if (argc < 3 || argc > 4) {
print_debugger_command_usage(argv[0]);
return 0;
}
bool isArea = false;
int argi = 1;
if (argc == 4) {
if (strcmp(argv[argi], "area") != 0) {
print_debugger_command_usage(argv[0]);
return 0;
}
argi++;
isArea = true;
}
uint64 addressValue;
uint64 debugEntryIndex;
if (!evaluate_debug_expression(argv[argi++], &addressValue, false)
|| !evaluate_debug_expression(argv[argi++], &debugEntryIndex, false)) {
return 0;
}
TraceEntryIterator baseIterator;
if (baseIterator.MoveTo((int32)debugEntryIndex) == NULL) {
kprintf("Invalid tracing entry index %" B_PRIu64 "\n", debugEntryIndex);
return 0;
}
void* address = (void*)(addr_t)addressValue;
kprintf("cache stack for %s %p at %" B_PRIu64 ":\n",
isArea ? "area" : "cache", address, debugEntryIndex);
if (isArea) {
address = cache_stack_find_area_cache(baseIterator, address);
if (address == NULL) {
kprintf(" cache not found\n");
return 0;
}
}
while (address != NULL) {
kprintf(" %p\n", address);
address = cache_stack_find_consumer(baseIterator, address);
}
return 0;
}
#endif
status_t
vm_cache_init(kernel_args* args)
{
gCacheRefObjectCache = create_object_cache("cache refs", sizeof(VMCacheRef),
0);
#if ENABLE_SWAP_SUPPORT
gAnonymousCacheObjectCache = create_object_cache("anon caches",
sizeof(VMAnonymousCache), 0);
#endif
gAnonymousNoSwapCacheObjectCache = create_object_cache(
"anon no-swap caches", sizeof(VMAnonymousNoSwapCache), 0);
gVnodeCacheObjectCache = create_object_cache("vnode caches",
sizeof(VMVnodeCache), 0);
gDeviceCacheObjectCache = create_object_cache("device caches",
sizeof(VMDeviceCache), 0);
gNullCacheObjectCache = create_object_cache("null caches",
sizeof(VMNullCache), 0);
if (gCacheRefObjectCache == NULL
#if ENABLE_SWAP_SUPPORT
|| gAnonymousCacheObjectCache == NULL
#endif
|| gAnonymousNoSwapCacheObjectCache == NULL
|| gVnodeCacheObjectCache == NULL
|| gDeviceCacheObjectCache == NULL
|| gNullCacheObjectCache == NULL) {
panic("vm_cache_init(): Failed to create object caches!");
return B_NO_MEMORY;
}
return B_OK;
}
void
vm_cache_init_post_heap()
{
#if VM_CACHE_TRACING
add_debugger_command_etc("cache_stack", &command_cache_stack,
"List the ancestors (sources) of a VMCache at the time given by "
"tracing entry index",
"[ \"area\" ] <address> <tracing entry index>\n"
"All ancestors (sources) of a given VMCache at the time given by the\n"
"tracing entry index are listed. If \"area\" is given the supplied\n"
"address is an area instead of a cache address. The listing will\n"
"start with the area's cache at that point.\n",
0);
#endif
}
VMCache*
vm_cache_acquire_locked_page_cache(vm_page* page, bool dontWait)
{
rw_lock_read_lock(&sCacheListLock);
while (true) {
VMCacheRef* cacheRef = page->CacheRef();
if (cacheRef == NULL) {
rw_lock_read_unlock(&sCacheListLock);
return NULL;
}
VMCache* cache = cacheRef->cache;
if (dontWait) {
if (!cache->TryLock()) {
rw_lock_read_unlock(&sCacheListLock);
return NULL;
}
} else {
if (!cache->SwitchFromReadLock(&sCacheListLock)) {
rw_lock_read_lock(&sCacheListLock);
continue;
}
rw_lock_read_lock(&sCacheListLock);
}
if (cache == page->Cache()) {
rw_lock_read_unlock(&sCacheListLock);
cache->AcquireRefLocked();
return cache;
}
cache->Unlock();
}
}
VMCacheRef::VMCacheRef(VMCache* cache)
:
cache(cache)
{
}
bool
VMCache::_IsMergeable() const
{
return areas.IsEmpty() && temporary
&& !consumers.IsEmpty() && consumers.Head() == consumers.Tail();
}
VMCache::VMCache()
:
fCacheRef(NULL)
{
}
VMCache::~VMCache()
{
ASSERT(fRefCount == 0 && page_count == 0);
object_cache_delete(gCacheRefObjectCache, fCacheRef);
}
status_t
VMCache::Init(const char* name, uint32 cacheType, uint32 allocationFlags)
{
mutex_init(&fLock, name);
fRefCount = 1;
source = NULL;
virtual_base = 0;
virtual_end = 0;
temporary = 0;
page_count = 0;
fWiredPagesCount = 0;
fFaultCount = 0;
fCopiedPagesCount = 0;
type = cacheType;
fPageEventWaiters = NULL;
#if DEBUG_CACHE_LIST
debug_previous = NULL;
debug_next = NULL;
#endif
fCacheRef = new(gCacheRefObjectCache, allocationFlags) VMCacheRef(this);
if (fCacheRef == NULL)
return B_NO_MEMORY;
#if DEBUG_CACHE_LIST
rw_lock_write_lock(&sCacheListLock);
if (gDebugCacheList != NULL)
gDebugCacheList->debug_previous = this;
debug_next = gDebugCacheList;
gDebugCacheList = this;
rw_lock_write_unlock(&sCacheListLock);
#endif
return B_OK;
}
void
VMCache::Delete()
{
if (!areas.IsEmpty())
panic("cache %p to be deleted still has areas", this);
if (!consumers.IsEmpty())
panic("cache %p to be deleted still has consumers", this);
if (!fRemovedBusyPages.IsEmpty())
panic("cache %p to be deleted still has removed busy pages", this);
T(Delete(this));
vm_page_reservation reservation = {};
while (vm_page* page = pages.Root()) {
if (!page->mappings.IsEmpty() || page->WiredCount() != 0) {
panic("remove page %p from cache %p: page still has mappings!\n"
"@!page %p; cache %p", page, this, page, this);
}
pages.Remove(page);
page->SetCacheRef(NULL);
page_count--;
TRACE(("vm_cache_release_ref: freeing page 0x%lx\n",
page->physical_page_number));
DEBUG_PAGE_ACCESS_START(page);
vm_page_free_etc(this, page, &reservation);
}
vm_page_unreserve_pages(&reservation);
if (source)
source->_RemoveConsumer(this);
rw_lock_write_lock(&sCacheListLock);
#if DEBUG_CACHE_LIST
if (debug_previous)
debug_previous->debug_next = debug_next;
if (debug_next)
debug_next->debug_previous = debug_previous;
if (this == gDebugCacheList)
gDebugCacheList = debug_next;
#endif
mutex_destroy(&fLock);
rw_lock_write_unlock(&sCacheListLock);
DeleteObject();
}
void
VMCache::Unlock(bool consumerLocked)
{
while (fRefCount == 1 && _IsMergeable()) {
VMCache* consumer = consumers.Head();
if (consumerLocked) {
_MergeWithOnlyConsumer();
} else if (consumer->TryLock()) {
_MergeWithOnlyConsumer();
consumer->Unlock();
} else {
fRefCount++;
bool consumerLockedTemp = consumer->SwitchLock(&fLock);
Lock();
fRefCount--;
if (consumerLockedTemp) {
if (fRefCount == 1 && _IsMergeable()
&& consumer == consumers.Head()) {
_MergeWithOnlyConsumer();
}
consumer->Unlock();
}
}
}
if (fRefCount == 0) {
Delete();
} else
mutex_unlock(&fLock);
}
vm_page*
VMCache::LookupPage(off_t offset)
{
AssertLocked();
vm_page* page = pages.Lookup((page_num_t)(offset >> PAGE_SHIFT));
#if KDEBUG
if (page != NULL && page->Cache() != this)
panic("page %p not in cache %p\n", page, this);
#endif
return page;
}
void
VMCache::InsertPage(vm_page* page, off_t offset)
{
TRACE(("VMCache::InsertPage(): cache %p, page %p, offset %" B_PRIdOFF "\n",
this, page, offset));
T2(InsertPage(this, page, offset));
AssertLocked();
ASSERT(offset >= virtual_base && offset < virtual_end);
if (page->CacheRef() != NULL) {
panic("insert page %p into cache %p: page cache is set to %p\n",
page, this, page->Cache());
}
page->cache_offset = (page_num_t)(offset >> PAGE_SHIFT);
page_count++;
page->SetCacheRef(fCacheRef);
#if KDEBUG
vm_page* otherPage = pages.Lookup(page->cache_offset);
if (otherPage != NULL) {
panic("VMCache::InsertPage(): there's already page %p with cache "
"offset %" B_PRIuPHYSADDR " in cache %p; inserting page %p",
otherPage, page->cache_offset, this, page);
}
#endif
pages.Insert(page);
if (page->WiredCount() > 0)
IncrementWiredPagesCount();
}
void
VMCache::FreeRemovedPage(vm_page* page)
{
AssertLocked();
NotifyPageEvents(page, PAGE_EVENT_NOT_BUSY);
fRemovedBusyPages.Remove(page);
vm_page_free(this, page);
}
void
VMCache::RemovePage(vm_page* page)
{
TRACE(("VMCache::RemovePage(): cache %p, page %p\n", this, page));
AssertLocked();
if (page->Cache() != this) {
panic("remove page %p from cache %p: page cache is set to %p\n", page,
this, page->Cache());
}
T2(RemovePage(this, page));
pages.Remove(page);
page_count--;
page->SetCacheRef(NULL);
if (page->WiredCount() > 0)
DecrementWiredPagesCount();
}
void
VMCache::MovePage(vm_page* page, off_t offset)
{
VMCache* oldCache = page->Cache();
AssertLocked();
oldCache->AssertLocked();
ASSERT(offset >= virtual_base && offset < virtual_end);
oldCache->pages.Remove(page);
oldCache->page_count--;
T2(RemovePage(oldCache, page));
page->cache_offset = offset >> PAGE_SHIFT;
pages.Insert(page);
page_count++;
page->SetCacheRef(fCacheRef);
if (page->WiredCount() > 0) {
IncrementWiredPagesCount();
oldCache->DecrementWiredPagesCount();
}
T2(InsertPage(this, page, page->cache_offset << PAGE_SHIFT));
}
void
VMCache::MovePage(vm_page* page)
{
MovePage(page, page->cache_offset << PAGE_SHIFT);
}
void
VMCache::MoveAllPages(VMCache* fromCache)
{
AssertLocked();
fromCache->AssertLocked();
ASSERT(page_count == 0);
std::swap(fromCache->pages, pages);
page_count = fromCache->page_count;
fromCache->page_count = 0;
fWiredPagesCount = fromCache->fWiredPagesCount;
fromCache->fWiredPagesCount = 0;
rw_lock_write_lock(&sCacheListLock);
std::swap(fCacheRef, fromCache->fCacheRef);
fCacheRef->cache = this;
fromCache->fCacheRef->cache = fromCache;
rw_lock_write_unlock(&sCacheListLock);
#if VM_CACHE_TRACING >= 2
for (VMCachePagesTree::Iterator it = pages.GetIterator();
vm_page* page = it.Next();) {
T2(RemovePage(fromCache, page));
T2(InsertPage(this, page, page->cache_offset << PAGE_SHIFT));
}
#endif
}
void
VMCache::WaitForPageEvents(vm_page* page, uint32 events, bool relock)
{
PageEventWaiter waiter;
waiter.thread = thread_get_current_thread();
waiter.next = fPageEventWaiters;
waiter.page = page;
waiter.events = events;
fPageEventWaiters = &waiter;
thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_OTHER_OBJECT, page);
Unlock();
thread_block();
if (relock)
Lock();
}
void
VMCache::AddConsumer(VMCache* consumer)
{
TRACE(("add consumer vm cache %p to cache %p\n", consumer, this));
T(AddConsumer(this, consumer));
AssertLocked();
consumer->AssertLocked();
ASSERT(consumer->source == NULL);
consumer->source = this;
consumers.Add(consumer);
AcquireRefLocked();
AcquireStoreRef();
}
void
VMCache::InsertAreaLocked(VMArea* area)
{
TRACE(("VMCache::InsertAreaLocked(cache %p, area %p)\n", this, area));
T(InsertArea(this, area));
AssertLocked();
areas.Insert(area, false);
AcquireStoreRef();
}
void
VMCache::RemoveArea(VMArea* area)
{
TRACE(("VMCache::RemoveArea(cache %p, area %p)\n", this, area));
T(RemoveArea(this, area));
ReleaseStoreRef();
AutoLocker<VMCache> locker(this);
areas.Remove(area);
}
void
VMCache::TakeAreasFrom(VMCache* fromCache)
{
AssertLocked();
fromCache->AssertLocked();
ASSERT(areas.IsEmpty());
areas.TakeFrom(&fromCache->areas);
for (VMArea* area = areas.First(); area != NULL; area = areas.GetNext(area)) {
area->cache = this;
AcquireRefLocked();
fromCache->ReleaseRefLocked();
T(RemoveArea(fromCache, area));
T(InsertArea(this, area));
}
}
uint32
VMCache::CountWritableAreas(VMArea* ignoreArea) const
{
uint32 count = 0;
for (VMArea* area = areas.First(); area != NULL; area = areas.GetNext(area)) {
if (area != ignoreArea
&& (area->protection & (B_WRITE_AREA | B_KERNEL_WRITE_AREA)) != 0) {
count++;
}
}
return count;
}
status_t
VMCache::WriteModified()
{
TRACE(("VMCache::WriteModified(cache = %p)\n", this));
if (temporary)
return B_OK;
Lock();
status_t status = vm_page_write_modified_pages(this);
Unlock();
return status;
}
status_t
VMCache::SetMinimalCommitment(off_t commitment, int priority)
{
TRACE(("VMCache::SetMinimalCommitment(cache %p, commitment %" B_PRIdOFF
")\n", this, commitment));
T(SetMinimalCommitment(this, commitment));
status_t status = B_OK;
if (Commitment() < commitment) {
#if KDEBUG
const off_t size = PAGE_ALIGN(virtual_end - virtual_base);
ASSERT_PRINT(commitment <= size, "cache %p, commitment %" B_PRIdOFF ", size %" B_PRIdOFF,
this, commitment, size);
#endif
status = Commit(commitment, priority);
}
return status;
}
bool
VMCache::_FreePageRange(VMCachePagesTree::Iterator it,
page_num_t* toPage = NULL, page_num_t* freedPages = NULL)
{
for (vm_page* page = it.Next();
page != NULL && (toPage == NULL || page->cache_offset < *toPage);
page = it.Next()) {
if (page->busy) {
if (!page->busy_io) {
WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true);
return true;
}
page->busy_io = false;
}
ASSERT(page->WiredCount() == 0);
DEBUG_PAGE_ACCESS_START(page);
vm_remove_all_page_mappings(page);
RemovePage(page);
if (page->busy) {
fRemovedBusyPages.Add(page);
DEBUG_PAGE_ACCESS_END(page);
} else {
vm_page_free(this, page);
}
if (freedPages != NULL)
(*freedPages)++;
}
return false;
}
status_t
VMCache::Resize(off_t newSize, int priority)
{
TRACE(("VMCache::Resize(cache %p, newSize %" B_PRIdOFF ") old size %"
B_PRIdOFF "\n", this, newSize, this->virtual_end));
T(Resize(this, newSize));
AssertLocked();
page_num_t oldPageCount = (page_num_t)((virtual_end + B_PAGE_SIZE - 1)
>> PAGE_SHIFT);
page_num_t newPageCount = (page_num_t)((newSize + B_PAGE_SIZE - 1)
>> PAGE_SHIFT);
if (newPageCount < oldPageCount) {
while (_FreePageRange(pages.GetIterator(newPageCount, true, true)))
;
}
if (newSize < virtual_end && newPageCount > 0) {
uint32 partialBytes = newSize % B_PAGE_SIZE;
if (partialBytes != 0) {
vm_page* page = LookupPage(newSize - partialBytes);
if (page != NULL) {
vm_memset_physical(page->physical_page_number * B_PAGE_SIZE
+ partialBytes, 0, B_PAGE_SIZE - partialBytes);
}
}
}
if (priority >= 0) {
status_t status = Commit(PAGE_ALIGN(newSize - virtual_base), priority);
if (status != B_OK)
return status;
}
virtual_end = newSize;
return B_OK;
}
status_t
VMCache::Rebase(off_t newBase, int priority)
{
TRACE(("VMCache::Rebase(cache %p, newBase %lld) old base %lld\n",
this, newBase, this->virtual_base));
T(Rebase(this, newBase));
AssertLocked();
page_num_t basePage = (page_num_t)(newBase >> PAGE_SHIFT);
if (newBase > virtual_base) {
while (_FreePageRange(pages.GetIterator(), &basePage))
;
}
if (priority >= 0) {
status_t status = Commit(PAGE_ALIGN(virtual_end - newBase), priority);
if (status != B_OK)
return status;
}
virtual_base = newBase;
return B_OK;
}
status_t
VMCache::Adopt(VMCache* source, off_t offset, off_t size, off_t newOffset)
{
page_num_t startPage = offset >> PAGE_SHIFT;
page_num_t endPage = (offset + size + B_PAGE_SIZE - 1) >> PAGE_SHIFT;
off_t offsetChange = newOffset - offset;
VMCachePagesTree::Iterator it = source->pages.GetIterator(startPage, true,
true);
for (vm_page* page = it.Next();
page != NULL && page->cache_offset < endPage;
page = it.Next()) {
MovePage(page, (page->cache_offset << PAGE_SHIFT) + offsetChange);
}
return B_OK;
}
ssize_t
VMCache::Discard(off_t offset, off_t size)
{
page_num_t discarded = 0;
page_num_t startPage = offset >> PAGE_SHIFT;
page_num_t endPage = (offset + size + B_PAGE_SIZE - 1) >> PAGE_SHIFT;
while (_FreePageRange(pages.GetIterator(startPage, true, true), &endPage, &discarded))
;
return (discarded * B_PAGE_SIZE);
}
status_t
VMCache::FlushAndRemoveAllPages()
{
ASSERT_LOCKED_MUTEX(&fLock);
while (page_count > 0) {
status_t status = vm_page_write_modified_pages(this);
if (status != B_OK)
return status;
for (VMCachePagesTree::Iterator it = pages.GetIterator();
vm_page* page = it.Next();) {
if (page->busy) {
WaitForPageEvents(page, PAGE_EVENT_NOT_BUSY, true);
it = pages.GetIterator();
continue;
}
if (page->State() == PAGE_STATE_MODIFIED)
continue;
if (page->IsMapped())
return B_BUSY;
DEBUG_PAGE_ACCESS_START(page);
RemovePage(page);
vm_page_free(this, page);
}
}
return B_OK;
}
off_t
VMCache::Commitment() const
{
return 0;
}
bool
VMCache::CanOvercommit()
{
return false;
}
status_t
VMCache::Commit(off_t size, int priority)
{
ASSERT_UNREACHABLE();
return B_NOT_SUPPORTED;
}
void
VMCache::TakeCommitmentFrom(VMCache* from, off_t commitment)
{
ASSERT_UNREACHABLE();
}
bool
VMCache::StoreHasPage(off_t offset)
{
return false;
}
status_t
VMCache::Read(off_t offset, const generic_io_vec *vecs, size_t count,
uint32 flags, generic_size_t *_numBytes)
{
return B_ERROR;
}
status_t
VMCache::Write(off_t offset, const generic_io_vec *vecs, size_t count,
uint32 flags, generic_size_t *_numBytes)
{
return B_ERROR;
}
status_t
VMCache::WriteAsync(off_t offset, const generic_io_vec* vecs, size_t count,
generic_size_t numBytes, uint32 flags, AsyncIOCallback* callback)
{
generic_size_t transferred = numBytes;
status_t error = Write(offset, vecs, count, flags, &transferred);
if (callback != NULL)
callback->IOFinished(error, transferred != numBytes, transferred);
return error;
}
bool
VMCache::CanWritePage(off_t offset)
{
return false;
}
status_t
VMCache::Fault(struct VMAddressSpace *aspace, off_t offset)
{
return B_BAD_ADDRESS;
}
void
VMCache::Merge(VMCache* source)
{
const page_num_t firstOffset = ROUNDDOWN(virtual_base, B_PAGE_SIZE) >> PAGE_SHIFT,
endOffset = (page_num_t)((virtual_end + B_PAGE_SIZE - 1) >> PAGE_SHIFT);
VMCachePagesTree::Iterator it = source->pages.GetIterator();
while (vm_page* page = it.Next()) {
if (page->cache_offset < firstOffset || page->cache_offset >= endOffset)
continue;
vm_page* consumerPage = LookupPage(
(off_t)page->cache_offset << PAGE_SHIFT);
if (consumerPage == NULL) {
MovePage(page);
}
}
}
status_t
VMCache::AcquireUnreferencedStoreRef()
{
return B_ERROR;
}
void
VMCache::AcquireStoreRef()
{
}
void
VMCache::ReleaseStoreRef()
{
}
bool
VMCache::DebugStoreHasPage(off_t offset)
{
return StoreHasPage(offset);
}
vm_page*
VMCache::DebugLookupPage(off_t offset)
{
return pages.Lookup((page_num_t)(offset >> PAGE_SHIFT));
}
void
VMCache::Dump(bool showPages) const
{
kprintf("CACHE %p:\n", this);
kprintf(" ref_count: %" B_PRId32 "\n", RefCount());
kprintf(" source: %p\n", source);
kprintf(" type: %s\n", vm_cache_type_to_string(type));
kprintf(" virtual_base: 0x%" B_PRIx64 "\n", virtual_base);
kprintf(" virtual_end: 0x%" B_PRIx64 "\n", virtual_end);
kprintf(" temporary: %" B_PRIu32 "\n", uint32(temporary));
kprintf(" lock: %p\n", &fLock);
#if KDEBUG
kprintf(" lock.holder: %" B_PRId32 "\n", fLock.holder);
#endif
kprintf(" areas:\n");
for (VMArea* area = areas.First(); area != NULL; area = areas.GetNext(area)) {
kprintf(" area 0x%" B_PRIx32 ", %s\n", area->id, area->name);
kprintf("\tbase_addr: 0x%lx, size: 0x%lx\n", area->Base(),
area->Size());
kprintf("\tprotection: 0x%" B_PRIx32 "\n", area->protection);
kprintf("\towner: 0x%" B_PRIx32 "\n", area->address_space->ID());
}
kprintf(" consumers:\n");
for (ConsumerList::ConstIterator it = consumers.GetIterator();
VMCache* consumer = it.Next();) {
kprintf("\t%p\n", consumer);
}
kprintf(" pages:\n");
if (showPages) {
for (VMCachePagesTree::ConstIterator it = pages.GetIterator();
vm_page* page = it.Next();) {
if (!vm_page_is_dummy(page)) {
kprintf("\t%p ppn %#" B_PRIxPHYSADDR " offset %#" B_PRIxPHYSADDR
" state %u (%s) wired_count %u\n", page,
page->physical_page_number, page->cache_offset,
page->State(), page_state_to_string(page->State()),
page->WiredCount());
} else {
kprintf("\t%p DUMMY PAGE state %u (%s)\n",
page, page->State(), page_state_to_string(page->State()));
}
}
} else
kprintf("\t%" B_PRIu32 " in cache\n", page_count);
}
void
VMCache::_NotifyPageEvents(vm_page* page, uint32 events)
{
PageEventWaiter** it = &fPageEventWaiters;
while (PageEventWaiter* waiter = *it) {
if (waiter->page == page && (waiter->events & events) != 0) {
*it = waiter->next;
thread_unblock(waiter->thread, B_OK);
} else
it = &waiter->next;
}
}
void
VMCache::_MergeWithOnlyConsumer()
{
VMCache* consumer = consumers.RemoveHead();
TRACE(("merge vm cache %p (ref == %" B_PRId32 ") with vm cache %p\n",
this, this->fRefCount, consumer));
T(Merge(this, consumer));
consumer->Merge(this);
if (source != NULL) {
VMCache* newSource = source;
newSource->Lock();
newSource->consumers.Remove(this);
newSource->consumers.Add(consumer);
consumer->source = newSource;
source = NULL;
newSource->Unlock();
} else
consumer->source = NULL;
ReleaseRefLocked();
}
void
VMCache::_RemoveConsumer(VMCache* consumer)
{
TRACE(("remove consumer vm cache %p from cache %p\n", consumer, this));
T(RemoveConsumer(this, consumer));
consumer->AssertLocked();
Lock();
consumers.Remove(consumer);
consumer->source = NULL;
Unlock();
ReleaseStoreRef();
ReleaseRef();
}
status_t
VMCacheFactory::CreateAnonymousCache(VMCache*& _cache, bool canOvercommit,
int32 numPrecommittedPages, int32 numGuardPages, bool swappable,
int priority)
{
uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
| HEAP_DONT_LOCK_KERNEL_SPACE;
if (priority >= VM_PRIORITY_VIP)
allocationFlags |= HEAP_PRIORITY_VIP;
#if ENABLE_SWAP_SUPPORT
if (swappable) {
VMAnonymousCache* cache
= new(gAnonymousCacheObjectCache, allocationFlags) VMAnonymousCache;
if (cache == NULL)
return B_NO_MEMORY;
status_t error = cache->Init(canOvercommit, numPrecommittedPages,
numGuardPages, allocationFlags);
if (error != B_OK) {
cache->Delete();
return error;
}
T(Create(cache));
_cache = cache;
return B_OK;
}
#endif
VMAnonymousNoSwapCache* cache
= new(gAnonymousNoSwapCacheObjectCache, allocationFlags)
VMAnonymousNoSwapCache;
if (cache == NULL)
return B_NO_MEMORY;
status_t error = cache->Init(canOvercommit, numPrecommittedPages,
numGuardPages, allocationFlags);
if (error != B_OK) {
cache->Delete();
return error;
}
T(Create(cache));
_cache = cache;
return B_OK;
}
status_t
VMCacheFactory::CreateVnodeCache(VMCache*& _cache, struct vnode* vnode)
{
const uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
| HEAP_DONT_LOCK_KERNEL_SPACE;
VMVnodeCache* cache
= new(gVnodeCacheObjectCache, allocationFlags) VMVnodeCache;
if (cache == NULL)
return B_NO_MEMORY;
status_t error = cache->Init(vnode, allocationFlags);
if (error != B_OK) {
cache->Delete();
return error;
}
T(Create(cache));
_cache = cache;
return B_OK;
}
status_t
VMCacheFactory::CreateDeviceCache(VMCache*& _cache, addr_t baseAddress)
{
const uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
| HEAP_DONT_LOCK_KERNEL_SPACE;
VMDeviceCache* cache
= new(gDeviceCacheObjectCache, allocationFlags) VMDeviceCache;
if (cache == NULL)
return B_NO_MEMORY;
status_t error = cache->Init(baseAddress, allocationFlags);
if (error != B_OK) {
cache->Delete();
return error;
}
T(Create(cache));
_cache = cache;
return B_OK;
}
status_t
VMCacheFactory::CreateNullCache(int priority, VMCache*& _cache)
{
uint32 allocationFlags = HEAP_DONT_WAIT_FOR_MEMORY
| HEAP_DONT_LOCK_KERNEL_SPACE;
if (priority >= VM_PRIORITY_VIP)
allocationFlags |= HEAP_PRIORITY_VIP;
VMNullCache* cache
= new(gNullCacheObjectCache, allocationFlags) VMNullCache;
if (cache == NULL)
return B_NO_MEMORY;
status_t error = cache->Init(allocationFlags);
if (error != B_OK) {
cache->Delete();
return error;
}
T(Create(cache));
_cache = cache;
return B_OK;
}