#include "mmu.h"
#include <boot/platform.h>
#include <boot/stdio.h>
#include <boot/kernel_args.h>
#include <boot/stage2.h>
#include <arch/cpu.h>
#include <arch_kernel.h>
#include <platform/openfirmware/openfirmware.h>
#ifdef __ARM__
#include <arm_mmu.h>
#endif
#include <kernel.h>
#include <board_config.h>
#include <OS.h>
#include <string.h>
int32 of_address_cells(int package);
int32 of_size_cells(int package);
extern bool gIs440;
extern status_t arch_mmu_setup_pinned_tlb_amcc440(phys_addr_t totalRam,
size_t &tableSize, size_t &tlbSize);
#define TRACE_MMU
#ifdef TRACE_MMU
# define TRACE(x) dprintf x
#else
# define TRACE(x) ;
#endif
#define TRACE_MEMORY_MAP
static const size_t kMaxKernelSize = 0x400000;
static addr_t sNextPhysicalAddress = kMaxKernelSize;
static addr_t sNextVirtualAddress = KERNEL_BASE + kMaxKernelSize;
static void *sPageTable = 0;
static bool sHeapRegionAllocated = false;
static addr_t
get_next_virtual_address(size_t size)
{
addr_t address = sNextVirtualAddress;
sNextPhysicalAddress += size;
sNextVirtualAddress += size;
return address;
}
#if 0
static addr_t
get_next_physical_address(size_t size)
{
addr_t address = sNextPhysicalAddress;
sNextPhysicalAddress += size;
sNextVirtualAddress += size;
return address;
}
#endif
extern "C" addr_t
mmu_map_physical_memory(addr_t physicalAddress, size_t size, uint32 flags)
{
panic("WRITEME");
return 0;
}
extern "C" void
mmu_init_for_kernel(void)
{
TRACE(("mmu_init_for_kernel\n"));
#ifdef TRACE_MEMORY_MAP
{
uint32 i;
dprintf("phys memory ranges:\n");
for (i = 0; i < gKernelArgs.num_physical_memory_ranges; i++) {
dprintf(" base 0x%" B_PRIxPHYSADDR
", length 0x%" B_PRIxPHYSADDR "\n",
gKernelArgs.physical_memory_range[i].start,
gKernelArgs.physical_memory_range[i].size);
}
dprintf("allocated phys memory ranges:\n");
for (i = 0; i < gKernelArgs.num_physical_allocated_ranges; i++) {
dprintf(" base 0x%" B_PRIxPHYSADDR
", length 0x%" B_PRIxPHYSADDR "\n",
gKernelArgs.physical_allocated_range[i].start,
gKernelArgs.physical_allocated_range[i].size);
}
dprintf("allocated virt memory ranges:\n");
for (i = 0; i < gKernelArgs.num_virtual_allocated_ranges; i++) {
dprintf(" base 0x%" B_PRIxPHYSADDR
", length 0x%" B_PRIxPHYSADDR "\n",
gKernelArgs.virtual_allocated_range[i].start,
gKernelArgs.virtual_allocated_range[i].size);
}
}
#endif
}
static status_t
find_physical_memory_ranges(phys_addr_t &total)
{
int memory = -1;
int package;
dprintf("checking for memory...\n");
if (of_getprop(gChosen, "memory", &memory, sizeof(int)) == OF_FAILED)
package = of_finddevice("/memory");
else
package = of_instance_to_package(memory);
if (package == OF_FAILED)
return B_ERROR;
total = 0;
int root = of_finddevice("/");
int32 regAddressCells = of_address_cells(root);
int32 regSizeCells = of_size_cells(root);
if (regAddressCells == OF_FAILED || regSizeCells == OF_FAILED) {
dprintf("finding base/size length counts failed, assume 32-bit.\n");
regAddressCells = 1;
regSizeCells = 1;
}
if (regAddressCells > 2 || regSizeCells > 1) {
panic("%s: Unsupported OpenFirmware cell count detected.\n"
"Address Cells: %" B_PRId32 "; Size Cells: %" B_PRId32
" (CPU > 64bit?).\n", __func__, regAddressCells, regSizeCells);
return B_ERROR;
}
if (regAddressCells == 2) {
struct of_region<uint64, uint32> regions[64];
int count = of_getprop(package, "reg", regions, sizeof(regions));
if (count == OF_FAILED)
count = of_getprop(memory, "reg", regions, sizeof(regions));
if (count == OF_FAILED)
return B_ERROR;
count /= sizeof(regions[0]);
for (int32 i = 0; i < count; i++) {
if (regions[i].size <= 0) {
dprintf("%ld: empty region\n", i);
continue;
}
dprintf("%" B_PRIu32 ": base = %" B_PRIu64 ","
"size = %" B_PRIu32 "\n", i, regions[i].base, regions[i].size);
total += regions[i].size;
if (insert_physical_memory_range((addr_t)regions[i].base,
regions[i].size) != B_OK) {
dprintf("cannot map physical memory range "
"(num ranges = %" B_PRIu32 ")!\n",
gKernelArgs.num_physical_memory_ranges);
return B_ERROR;
}
}
return B_OK;
}
struct of_region<uint32, uint32> regions[64];
int count = of_getprop(package, "reg", regions, sizeof(regions));
if (count == OF_FAILED)
count = of_getprop(memory, "reg", regions, sizeof(regions));
if (count == OF_FAILED)
return B_ERROR;
count /= sizeof(regions[0]);
for (int32 i = 0; i < count; i++) {
if (regions[i].size <= 0) {
dprintf("%ld: empty region\n", i);
continue;
}
dprintf("%" B_PRIu32 ": base = %" B_PRIu32 ","
"size = %" B_PRIu32 "\n", i, regions[i].base, regions[i].size);
total += regions[i].size;
if (insert_physical_memory_range((addr_t)regions[i].base,
regions[i].size) != B_OK) {
dprintf("cannot map physical memory range "
"(num ranges = %" B_PRIu32 ")!\n",
gKernelArgs.num_physical_memory_ranges);
return B_ERROR;
}
}
return B_OK;
}
extern "C" void
mmu_init(void* fdt)
{
size_t tableSize, tlbSize;
status_t err;
TRACE(("mmu_init\n"));
phys_addr_t total;
if (find_physical_memory_ranges(total) != B_OK) {
dprintf("Error: could not find physical memory ranges!\n");
return ;
}
dprintf("total physical memory = %" B_PRId64 "MB\n", total / (1024 * 1024));
if (gIs440) {
err = arch_mmu_setup_pinned_tlb_amcc440(total, tableSize, tlbSize);
dprintf("setup_pinned_tlb: 0x%08lx table %zdMB tlb %zdMB\n",
err, tableSize / (1024 * 1024), tlbSize / (1024 * 1024));
} else {
panic("Unknown MMU type!");
return;
}
gKernelArgs.physical_allocated_range[0].start
= gKernelArgs.physical_memory_range[0].start;
gKernelArgs.physical_allocated_range[0].size = tlbSize;
gKernelArgs.num_physical_allocated_ranges = 1;
gKernelArgs.virtual_allocated_range[0].start = KERNEL_BASE;
gKernelArgs.virtual_allocated_range[0].size
= tlbSize + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
gKernelArgs.num_virtual_allocated_ranges = 1;
sPageTable = (void *)(tlbSize - tableSize - KERNEL_STACK_SIZE);
TRACE(("page table at 0x%p to 0x%p\n", sPageTable,
(uint8 *)sPageTable + tableSize));
gKernelArgs.cpu_kstack[0].start = (addr_t)(tlbSize - KERNEL_STACK_SIZE);
gKernelArgs.cpu_kstack[0].size = KERNEL_STACK_SIZE
+ KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
TRACE(("kernel stack at 0x%Lx to 0x%Lx\n", gKernelArgs.cpu_kstack[0].start,
gKernelArgs.cpu_kstack[0].start + gKernelArgs.cpu_kstack[0].size));
#ifdef __ARM__
init_page_directory();
gKernelArgs.arch_args.vir_pgdir = mmu_map_physical_memory(
(addr_t)sPageDirectory, MMU_L1_TABLE_SIZE, kDefaultPageFlags);
#endif
}
extern "C" status_t
platform_allocate_region(void **_address, size_t size, uint8 protection)
{
TRACE(("platform_allocate_region(&%p, %zd)\n", *_address, size));
size = (size + B_PAGE_SIZE - 1) / B_PAGE_SIZE * B_PAGE_SIZE;
if (*_address != NULL) {
addr_t address = (addr_t)*_address;
if (address < KERNEL_BASE
|| address + size >= KERNEL_BASE + kMaxKernelSize) {
TRACE(("mmu_allocate in illegal range\n address: %" B_PRIx32
" KERNELBASE: %" B_PRIx32 " KERNEL_BASE + kMaxKernelSize:"
" %" B_PRIx32 " address + size : %" B_PRIx32 " \n",
(uint32)address, (uint32)KERNEL_BASE,
KERNEL_BASE + kMaxKernelSize, (uint32)(address + size)));
return B_ERROR;
}
TRACE(("platform_allocate_region: allocated %zd bytes at %08lx\n", size,
address));
return B_OK;
}
void *address = (void *)get_next_virtual_address(size);
if (address == NULL)
return B_NO_MEMORY;
TRACE(("platform_allocate_region: allocated %zd bytes at %p\n", size,
address));
*_address = address;
return B_OK;
}
extern "C" status_t
platform_free_region(void *address, size_t size)
{
TRACE(("platform_free_region(%p, %zd)\n", address, size));
#ifdef __ARM__
mmu_free(address, size);
#endif
return B_OK;
}
ssize_t
platform_allocate_heap_region(size_t size, void **_base)
{
if (sHeapRegionAllocated)
return B_NO_MEMORY;
sHeapRegionAllocated = true;
void *heap = (uint8 *)sPageTable - size;
*_base = heap;
TRACE(("boot heap at 0x%p\n", *_base));
return size;
}
void
platform_free_heap_region(void *_base, size_t size)
{
}