#ifndef KERNEL_SCHEDULER_CPU_H
#define KERNEL_SCHEDULER_CPU_H
#include <OS.h>
#include <smp.h>
#include <thread.h>
#include <util/AutoLock.h>
#include <util/Heap.h>
#include <util/MinMaxHeap.h>
#include <cpufreq.h>
#include "RunQueue.h"
#include "scheduler_common.h"
#include "scheduler_modes.h"
#include "scheduler_profiler.h"
namespace Scheduler {
class DebugDumper;
struct ThreadData;
class ThreadProcessing;
class CPUEntry;
class CoreEntry;
class PackageEntry;
class ThreadRunQueue : public RunQueue<ThreadData, THREAD_MAX_SET_PRIORITY> {
public:
void Dump() const;
};
class CPUEntry : public HeapLinkImpl<CPUEntry, int32> {
public:
CPUEntry();
void Init(int32 id, CoreEntry* core);
inline int32 ID() const { return fCPUNumber; }
inline CoreEntry* Core() const { return fCore; }
void Start();
void Stop();
inline void EnterScheduler();
inline void ExitScheduler();
inline void LockScheduler();
inline void UnlockScheduler();
inline void LockRunQueue();
inline void UnlockRunQueue();
void PushFront(ThreadData* thread,
int32 priority);
void PushBack(ThreadData* thread,
int32 priority);
void Remove(ThreadData* thread);
ThreadData* PeekThread() const;
ThreadData* PeekIdleThread() const;
void UpdatePriority(int32 priority);
inline int32 GetLoad() const { return fLoad; }
void ComputeLoad();
ThreadData* ChooseNextThread(ThreadData* oldThread,
bool putAtBack);
void TrackActivity(ThreadData* oldThreadData,
ThreadData* nextThreadData);
void StartQuantumTimer(ThreadData* thread,
bool wasPreempted);
static inline CPUEntry* GetCPU(int32 cpu);
private:
void _RequestPerformanceLevel(
ThreadData* threadData);
static int32 _RescheduleEvent(timer* );
static int32 _UpdateLoadEvent(timer* );
int32 fCPUNumber;
CoreEntry* fCore;
rw_spinlock fSchedulerModeLock;
ThreadRunQueue fRunQueue;
spinlock fQueueLock;
int32 fLoad;
bigtime_t fMeasureActiveTime;
bigtime_t fMeasureTime;
bool fUpdateLoadEvent;
friend class DebugDumper;
} CACHE_LINE_ALIGN;
class CPUPriorityHeap : public Heap<CPUEntry, int32> {
public:
CPUPriorityHeap() { }
CPUPriorityHeap(int32 cpuCount);
void Dump();
};
class CoreEntry : public MinMaxHeapLinkImpl<CoreEntry, int32>,
public DoublyLinkedListLinkImpl<CoreEntry> {
public:
CoreEntry();
void Init(int32 id, PackageEntry* package);
inline int32 ID() const { return fCoreID; }
inline PackageEntry* Package() const { return fPackage; }
inline int32 CPUCount() const
{ return fCPUCount; }
inline const CPUSet& CPUMask() const
{ return fCPUSet; }
inline void LockCPUHeap();
inline void UnlockCPUHeap();
inline CPUPriorityHeap* CPUHeap();
inline int32 ThreadCount() const;
inline void LockRunQueue();
inline void UnlockRunQueue();
void PushFront(ThreadData* thread,
int32 priority);
void PushBack(ThreadData* thread,
int32 priority);
void Remove(ThreadData* thread);
ThreadData* PeekThread() const;
inline bigtime_t GetActiveTime() const;
inline void IncreaseActiveTime(
bigtime_t activeTime);
inline int32 GetLoad() const;
inline uint32 LoadMeasurementEpoch() const
{ return fLoadMeasurementEpoch; }
inline void AddLoad(int32 load, uint32 epoch,
bool updateLoad);
inline uint32 RemoveLoad(int32 load, bool force);
inline void ChangeLoad(int32 delta);
inline void CPUGoesIdle(CPUEntry* cpu);
inline void CPUWakesUp(CPUEntry* cpu);
void AddCPU(CPUEntry* cpu);
void RemoveCPU(CPUEntry* cpu,
ThreadProcessing&
threadPostProcessing);
static inline CoreEntry* GetCore(int32 cpu);
private:
void _UpdateLoad(bool forceUpdate = false);
static void _UnassignThread(Thread* thread,
void* core);
int32 fCoreID;
PackageEntry* fPackage;
int32 fCPUCount;
CPUSet fCPUSet;
int32 fIdleCPUCount;
CPUPriorityHeap fCPUHeap;
spinlock fCPULock;
int32 fThreadCount;
ThreadRunQueue fRunQueue;
spinlock fQueueLock;
bigtime_t fActiveTime;
mutable seqlock fActiveTimeLock;
int32 fLoad;
int32 fCurrentLoad;
uint32 fLoadMeasurementEpoch;
bool fHighLoad;
bigtime_t fLastLoadUpdate;
rw_spinlock fLoadLock;
friend class DebugDumper;
} CACHE_LINE_ALIGN;
class CoreLoadHeap : public MinMaxHeap<CoreEntry, int32> {
public:
CoreLoadHeap() { }
CoreLoadHeap(int32 coreCount);
void Dump();
};
class PackageEntry : public DoublyLinkedListLinkImpl<PackageEntry> {
public:
PackageEntry();
void Init(int32 id);
inline void CoreGoesIdle(CoreEntry* core);
inline void CoreWakesUp(CoreEntry* core);
inline CoreEntry* GetIdleCore(int32 index = 0) const;
void AddIdleCore(CoreEntry* core);
void RemoveIdleCore(CoreEntry* core);
static inline PackageEntry* GetMostIdlePackage();
static inline PackageEntry* GetLeastIdlePackage();
private:
int32 fPackageID;
DoublyLinkedList<CoreEntry> fIdleCores;
int32 fIdleCoreCount;
int32 fCoreCount;
rw_spinlock fCoreLock;
friend class DebugDumper;
} CACHE_LINE_ALIGN;
typedef DoublyLinkedList<PackageEntry> IdlePackageList;
extern CPUEntry* gCPUEntries;
extern CoreEntry* gCoreEntries;
extern CoreLoadHeap gCoreLoadHeap;
extern CoreLoadHeap gCoreHighLoadHeap;
extern rw_spinlock gCoreHeapsLock;
extern int32 gCoreCount;
extern PackageEntry* gPackageEntries;
extern IdlePackageList gIdlePackageList;
extern rw_spinlock gIdlePackageLock;
extern int32 gPackageCount;
inline void
CPUEntry::EnterScheduler()
{
SCHEDULER_ENTER_FUNCTION();
acquire_read_spinlock(&fSchedulerModeLock);
}
inline void
CPUEntry::ExitScheduler()
{
SCHEDULER_ENTER_FUNCTION();
release_read_spinlock(&fSchedulerModeLock);
}
inline void
CPUEntry::LockScheduler()
{
SCHEDULER_ENTER_FUNCTION();
acquire_write_spinlock(&fSchedulerModeLock);
}
inline void
CPUEntry::UnlockScheduler()
{
SCHEDULER_ENTER_FUNCTION();
release_write_spinlock(&fSchedulerModeLock);
}
inline void
CPUEntry::LockRunQueue()
{
SCHEDULER_ENTER_FUNCTION();
acquire_spinlock(&fQueueLock);
}
inline void
CPUEntry::UnlockRunQueue()
{
SCHEDULER_ENTER_FUNCTION();
release_spinlock(&fQueueLock);
}
inline CPUEntry*
CPUEntry::GetCPU(int32 cpu)
{
SCHEDULER_ENTER_FUNCTION();
return &gCPUEntries[cpu];
}
inline void
CoreEntry::LockCPUHeap()
{
SCHEDULER_ENTER_FUNCTION();
acquire_spinlock(&fCPULock);
}
inline void
CoreEntry::UnlockCPUHeap()
{
SCHEDULER_ENTER_FUNCTION();
release_spinlock(&fCPULock);
}
inline CPUPriorityHeap*
CoreEntry::CPUHeap()
{
SCHEDULER_ENTER_FUNCTION();
return &fCPUHeap;
}
inline int32
CoreEntry::ThreadCount() const
{
SCHEDULER_ENTER_FUNCTION();
return fThreadCount + fCPUCount - fIdleCPUCount;
}
inline void
CoreEntry::LockRunQueue()
{
SCHEDULER_ENTER_FUNCTION();
acquire_spinlock(&fQueueLock);
}
inline void
CoreEntry::UnlockRunQueue()
{
SCHEDULER_ENTER_FUNCTION();
release_spinlock(&fQueueLock);
}
inline void
CoreEntry::IncreaseActiveTime(bigtime_t activeTime)
{
SCHEDULER_ENTER_FUNCTION();
WriteSequentialLocker _(fActiveTimeLock);
fActiveTime += activeTime;
}
inline bigtime_t
CoreEntry::GetActiveTime() const
{
SCHEDULER_ENTER_FUNCTION();
bigtime_t activeTime;
uint32 count;
do {
count = acquire_read_seqlock(&fActiveTimeLock);
activeTime = fActiveTime;
} while (!release_read_seqlock(&fActiveTimeLock, count));
return activeTime;
}
inline int32
CoreEntry::GetLoad() const
{
SCHEDULER_ENTER_FUNCTION();
ASSERT(fCPUCount > 0);
return std::min(fLoad / fCPUCount, kMaxLoad);
}
inline void
CoreEntry::AddLoad(int32 load, uint32 epoch, bool updateLoad)
{
SCHEDULER_ENTER_FUNCTION();
ASSERT(gTrackCoreLoad);
ASSERT(load >= 0 && load <= kMaxLoad);
ReadSpinLocker locker(fLoadLock);
atomic_add(&fCurrentLoad, load);
if (fLoadMeasurementEpoch != epoch)
atomic_add(&fLoad, load);
locker.Unlock();
if (updateLoad)
_UpdateLoad(true);
}
inline uint32
CoreEntry::RemoveLoad(int32 load, bool force)
{
SCHEDULER_ENTER_FUNCTION();
ASSERT(gTrackCoreLoad);
ASSERT(load >= 0 && load <= kMaxLoad);
ReadSpinLocker locker(fLoadLock);
atomic_add(&fCurrentLoad, -load);
if (force) {
atomic_add(&fLoad, -load);
locker.Unlock();
_UpdateLoad(true);
}
return fLoadMeasurementEpoch;
}
inline void
CoreEntry::ChangeLoad(int32 delta)
{
SCHEDULER_ENTER_FUNCTION();
ASSERT(gTrackCoreLoad);
ASSERT(delta >= -kMaxLoad && delta <= kMaxLoad);
if (delta != 0) {
ReadSpinLocker locker(fLoadLock);
atomic_add(&fCurrentLoad, delta);
atomic_add(&fLoad, delta);
}
_UpdateLoad();
}
inline void
PackageEntry::CoreGoesIdle(CoreEntry* core)
{
SCHEDULER_ENTER_FUNCTION();
WriteSpinLocker _(fCoreLock);
ASSERT(fIdleCoreCount >= 0);
ASSERT(fIdleCoreCount < fCoreCount);
fIdleCoreCount++;
fIdleCores.Add(core);
if (fIdleCoreCount == fCoreCount) {
WriteSpinLocker _(gIdlePackageLock);
gIdlePackageList.Add(this);
}
}
inline void
PackageEntry::CoreWakesUp(CoreEntry* core)
{
SCHEDULER_ENTER_FUNCTION();
WriteSpinLocker _(fCoreLock);
ASSERT(fIdleCoreCount > 0);
ASSERT(fIdleCoreCount <= fCoreCount);
fIdleCoreCount--;
fIdleCores.Remove(core);
if (fIdleCoreCount + 1 == fCoreCount) {
WriteSpinLocker _(gIdlePackageLock);
gIdlePackageList.Remove(this);
}
}
inline void
CoreEntry::CPUGoesIdle(CPUEntry* )
{
if (gSingleCore)
return;
ASSERT(fIdleCPUCount < fCPUCount);
if (++fIdleCPUCount == fCPUCount)
fPackage->CoreGoesIdle(this);
}
inline void
CoreEntry::CPUWakesUp(CPUEntry* )
{
if (gSingleCore)
return;
ASSERT(fIdleCPUCount > 0);
if (fIdleCPUCount-- == fCPUCount)
fPackage->CoreWakesUp(this);
}
inline CoreEntry*
CoreEntry::GetCore(int32 cpu)
{
SCHEDULER_ENTER_FUNCTION();
return gCPUEntries[cpu].Core();
}
inline CoreEntry*
PackageEntry::GetIdleCore(int32 index) const
{
SCHEDULER_ENTER_FUNCTION();
CoreEntry* element = fIdleCores.Last();
for (int32 i = 0; element != NULL && i < index; i++)
element = fIdleCores.GetPrevious(element);
return element;
}
inline PackageEntry*
PackageEntry::GetMostIdlePackage()
{
SCHEDULER_ENTER_FUNCTION();
PackageEntry* current = &gPackageEntries[0];
for (int32 i = 1; i < gPackageCount; i++) {
if (gPackageEntries[i].fIdleCoreCount > current->fIdleCoreCount)
current = &gPackageEntries[i];
}
if (current->fIdleCoreCount == 0)
return NULL;
return current;
}
inline PackageEntry*
PackageEntry::GetLeastIdlePackage()
{
SCHEDULER_ENTER_FUNCTION();
PackageEntry* package = NULL;
for (int32 i = 0; i < gPackageCount; i++) {
PackageEntry* current = &gPackageEntries[i];
int32 currentIdleCoreCount = current->fIdleCoreCount;
if (currentIdleCoreCount != 0 && (package == NULL
|| currentIdleCoreCount < package->fIdleCoreCount)) {
package = current;
}
}
return package;
}
}
#endif