#include <boot/platform.h>
#include <boot/stage2.h>
#include <boot/stdio.h>
#include "arm_registers.h"
#include "efi_platform.h"
#include "generic_mmu.h"
#include "mmu.h"
#include "serial.h"
#include "smp.h"
#include "aarch64.h"
extern const char* granule_type_str(int tg);
extern void arch_mmu_setup_EL1(uint64 tcr);
extern "C" void arch_enter_kernel(struct kernel_args* kernelArgs,
addr_t kernelEntry, addr_t kernelStackTop, uint32 cpu);
extern void arch_mmu_post_efi_setup(size_t memoryMapSize,
efi_memory_descriptor *memoryMap, size_t descriptorSize,
uint32_t descriptorVersion);
extern uint32_t arch_mmu_generate_post_efi_page_tables(size_t memoryMapSize,
efi_memory_descriptor *memoryMap, size_t descriptorSize,
uint32_t descriptorVersion);
void
arch_convert_kernel_args(void)
{
fix_address(gKernelArgs.arch_args.fdt);
}
void
arm64_common_cpu_startup()
{
uint64 el = arch_exception_level();
bool e2h = false;
if (el == 2) {
uint64 hcr = READ_SPECIALREG(HCR_EL2);
if ((hcr & HCR_E2H) != 0) {
e2h = true;
WRITE_SPECIALREG(HCR_EL2, hcr | HCR_TGE);
}
}
if (el == 1 || e2h) {
arch_mmu_setup_EL1(READ_SPECIALREG(TCR_EL1));
WRITE_SPECIALREG(CNTKCTL_EL1, 0b11);
} else {
arch_mmu_setup_EL1(READ_SPECIALREG(TCR_EL2));
arch_cache_disable();
_arch_transition_EL2_EL1();
}
arch_cache_enable();
}
void
arch_start_kernel(addr_t kernelEntry)
{
size_t memoryMapSize = 0;
efi_memory_descriptor dummy;
size_t mapKey;
size_t descriptorSize;
uint32_t descriptorVersion;
if (kBootServices->GetMemoryMap(&memoryMapSize, &dummy, &mapKey,
&descriptorSize, &descriptorVersion) != EFI_BUFFER_TOO_SMALL) {
panic("Unable to determine size of system memory map");
}
size_t actualMemoryMapSize = memoryMapSize * 2;
efi_memory_descriptor *memoryMap
= (efi_memory_descriptor *)kernel_args_malloc(actualMemoryMapSize);
if (memoryMap == NULL)
panic("Unable to allocate memory map.");
memoryMapSize = actualMemoryMapSize;
if (kBootServices->GetMemoryMap(&memoryMapSize, memoryMap, &mapKey,
&descriptorSize, &descriptorVersion) != EFI_SUCCESS) {
panic("Unable to fetch system memory map.");
}
addr_t addr = (addr_t)memoryMap;
efi_physical_addr loaderCode = 0LL;
dprintf("System provided memory map:\n");
for (size_t i = 0; i < memoryMapSize / descriptorSize; i++) {
efi_memory_descriptor *entry
= (efi_memory_descriptor *)(addr + i * descriptorSize);
dprintf(" phys: 0x%08" PRIx64 "-0x%08" PRIx64
", virt: 0x%08" PRIx64 "-0x%08" PRIx64
", size = 0x%08" PRIx64 ", type: %s (%#x), attr: %#" PRIx64 "\n",
entry->PhysicalStart,
entry->PhysicalStart + entry->NumberOfPages * B_PAGE_SIZE,
entry->VirtualStart,
entry->VirtualStart + entry->NumberOfPages * B_PAGE_SIZE,
entry->NumberOfPages * B_PAGE_SIZE,
memory_region_type_str(entry->Type), entry->Type,
entry->Attribute);
if (entry->Type == EfiLoaderCode)
loaderCode = entry->PhysicalStart;
}
dprintf("Efi loader symbols offset: 0x%0lx:\n", loaderCode);
uint64 el = arch_exception_level();
dprintf("Current Exception Level EL%1lx\n", el);
dprintf("TTBR0: %" B_PRIx64 " TTBRx: %" B_PRIx64 " SCTLR: %" B_PRIx64 " TCR: %" B_PRIx64 "\n",
arch_mmu_base_register(), arch_mmu_base_register(true), _arch_mmu_get_sctlr(),
_arch_mmu_get_tcr());
if (arch_mmu_enabled()) {
dprintf("MMU Enabled, Granularity %s, bits %d\n", granule_type_str(arch_mmu_user_granule()),
arch_mmu_user_address_bits());
dprintf("Kernel entry accessibility W: %x R: %x\n", arch_mmu_write_access(kernelEntry),
arch_mmu_read_access(kernelEntry));
}
uint64 ttbr1 = arch_mmu_generate_post_efi_page_tables(
memoryMapSize, memoryMap, descriptorSize, descriptorVersion);
dprintf("Calling ExitBootServices. So long, EFI!\n");
serial_disable();
while (true) {
if (kBootServices->ExitBootServices(kImage, mapKey) == EFI_SUCCESS) {
serial_kernel_handoff();
dprintf("Unhooked from EFI serial services\n");
break;
}
memoryMapSize = actualMemoryMapSize;
if (kBootServices->GetMemoryMap(&memoryMapSize, memoryMap, &mapKey,
&descriptorSize, &descriptorVersion) != EFI_SUCCESS) {
panic("Unable to fetch system memory map.");
}
}
arch_mmu_post_efi_setup(memoryMapSize, memoryMap,
descriptorSize, descriptorVersion);
serial_init();
serial_enable();
arm64_common_cpu_startup();
smp_boot_other_cpus(ttbr1, kernelEntry, (addr_t)&gKernelArgs);
if (arch_mmu_read_access(kernelEntry)
&& arch_mmu_read_access(gKernelArgs.cpu_kstack[0].start)) {
arch_enter_kernel(&gKernelArgs, kernelEntry,
gKernelArgs.cpu_kstack[0].start + gKernelArgs.cpu_kstack[0].size, 0);
} else {
panic("Kernel or Stack memory not accessible\n");
}
}