root/src/system/kernel/arch/ppc/paging/PPCVMTranslationMap.cpp
/*
 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
 * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
 * Distributed under the terms of the MIT License.
 *
 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
 * Distributed under the terms of the NewOS License.
 */


#include "paging/PPCVMTranslationMap.h"

#include <thread.h>
#include <smp.h>

#include "paging/PPCPagingStructures.h"


//#define TRACE_PPC_VM_TRANSLATION_MAP
#ifdef TRACE_PPC_VM_TRANSLATION_MAP
#       define TRACE(x...) dprintf(x)
#else
#       define TRACE(x...) ;
#endif


PPCVMTranslationMap::PPCVMTranslationMap()
        :
        //X86:fPageMapper(NULL),
        fInvalidPagesCount(0)
{
}


PPCVMTranslationMap::~PPCVMTranslationMap()
{
}


status_t
PPCVMTranslationMap::Init(bool kernel)
{
        fIsKernelMap = kernel;
        return B_OK;
}


/*!     Acquires the map's recursive lock, and resets the invalidate pages counter
        in case it's the first locking recursion.
*/
bool
PPCVMTranslationMap::Lock()
{
        TRACE("%p->PPCVMTranslationMap::Lock()\n", this);

        recursive_lock_lock(&fLock);
        if (recursive_lock_get_recursion(&fLock) == 1) {
                // we were the first one to grab the lock
                TRACE("clearing invalidated page count\n");
                fInvalidPagesCount = 0;
        }

        return true;
}


/*!     Unlocks the map, and, if we are actually losing the recursive lock,
        flush all pending changes of this map (ie. flush TLB caches as
        needed).
*/
void
PPCVMTranslationMap::Unlock()
{
        TRACE("%p->PPCVMTranslationMap::Unlock()\n", this);

        if (recursive_lock_get_recursion(&fLock) == 1) {
                // we're about to release it for the last time
                Flush();
        }

        recursive_lock_unlock(&fLock);
}


addr_t
PPCVMTranslationMap::MappedSize() const
{
        return fMapCount;
}


void
PPCVMTranslationMap::Flush()
{
        if (fInvalidPagesCount <= 0)
                return;

        Thread* thread = thread_get_current_thread();
        thread_pin_to_current_cpu(thread);

        if (fInvalidPagesCount > PAGE_INVALIDATE_CACHE_SIZE) {
                // invalidate all pages
                TRACE("flush_tmap: %d pages to invalidate, invalidate all\n",
                        fInvalidPagesCount);

                if (fIsKernelMap) {
                        smp_broadcast_ici(SMP_MSG_GLOBAL_INVALIDATE_PAGES, 0, 0, 0,
                                NULL, SMP_MSG_FLAG_SYNC);
                } else {
                        InvalidateUserTLB(PagingStructures()->active_on_cpus, 0);
                }
        } else {
                TRACE("flush_tmap: %d pages to invalidate, invalidate list\n",
                        fInvalidPagesCount);

                if (fIsKernelMap) {
                        smp_broadcast_ici(SMP_MSG_INVALIDATE_PAGE_LIST,
                                0, (addr_t)fInvalidPages, fInvalidPagesCount, NULL,
                                SMP_MSG_FLAG_SYNC);
                } else {
                        InvalidateTLBList(PagingStructures()->active_on_cpus, 0,
                                fInvalidPages, fInvalidPagesCount);
                }
        }
        fInvalidPagesCount = 0;

        thread_unpin_from_current_cpu(thread);
}