#include <sys/cdefs.h>
#include "opt_ddb.h"
#include "opt_kstack_pages.h"
#include "opt_platform.h"
#include <sys/param.h>
#include <sys/proc.h>
#include <sys/systm.h>
#include <sys/bio.h>
#include <sys/buf.h>
#include <sys/bus.h>
#include <sys/cons.h>
#include <sys/cpu.h>
#include <sys/eventhandler.h>
#include <sys/exec.h>
#include <sys/imgact.h>
#include <sys/kdb.h>
#include <sys/kernel.h>
#include <sys/ktr.h>
#include <sys/linker.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/msgbuf.h>
#include <sys/mutex.h>
#include <sys/ptrace.h>
#include <sys/reboot.h>
#include <sys/reg.h>
#include <sys/rwlock.h>
#include <sys/sched.h>
#include <sys/signalvar.h>
#include <sys/syscallsubr.h>
#include <sys/sysctl.h>
#include <sys/sysent.h>
#include <sys/sysproto.h>
#include <sys/ucontext.h>
#include <sys/uio.h>
#include <sys/vmmeter.h>
#include <sys/vnode.h>
#include <net/netisr.h>
#include <vm/vm.h>
#include <vm/vm_extern.h>
#include <vm/vm_kern.h>
#include <vm/vm_page.h>
#include <vm/vm_phys.h>
#include <vm/vm_map.h>
#include <vm/vm_object.h>
#include <vm/vm_pager.h>
#include <machine/altivec.h>
#ifndef __powerpc64__
#include <machine/bat.h>
#endif
#include <machine/cpu.h>
#include <machine/elf.h>
#include <machine/fpu.h>
#include <machine/hid.h>
#include <machine/ifunc.h>
#include <machine/kdb.h>
#include <machine/md_var.h>
#include <machine/metadata.h>
#include <machine/mmuvar.h>
#include <machine/pcb.h>
#include <machine/sigframe.h>
#include <machine/spr.h>
#include <machine/trap.h>
#include <machine/vmparam.h>
#include <machine/ofw_machdep.h>
#include <ddb/ddb.h>
#include <dev/ofw/openfirm.h>
#include <dev/ofw/ofw_subr.h>
int cold = 1;
#ifdef __powerpc64__
int cacheline_size = 128;
#else
int cacheline_size = 32;
#endif
#ifdef __powerpc64__
int hw_direct_map = -1;
#else
int hw_direct_map = 1;
#endif
#ifdef BOOKE
extern vm_paddr_t kernload;
#endif
extern void *ap_pcpu;
struct pcpu __pcpu[MAXCPU] __aligned(PAGE_SIZE);
static char init_kenv[2048];
static struct trapframe frame0;
const char machine[] = "powerpc";
SYSCTL_CONST_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD | CTLFLAG_CAPRD,
machine, "Machine class");
static void cpu_startup(void *);
SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
SYSCTL_INT(_machdep, CPU_CACHELINE, cacheline_size,
CTLFLAG_RD, &cacheline_size, 0, "");
uintptr_t powerpc_init(vm_offset_t, vm_offset_t, vm_offset_t, void *,
uint32_t);
static void fake_preload_metadata(void);
long Maxmem = 0;
long realmem = 0;
register_t psl_kernset;
register_t psl_userset;
register_t psl_userstatic;
#ifdef __powerpc64__
register_t psl_userset32;
#endif
struct kva_md_info kmi;
static void
cpu_startup(void *dummy)
{
decr_init();
cpu_setup(PCPU_GET(cpuid));
#ifdef PERFMON
perfmon_init();
#endif
printf("real memory = %ju (%ju MB)\n", ptoa((uintmax_t)physmem),
ptoa((uintmax_t)physmem) / 1048576);
realmem = physmem;
if (bootverbose)
printf("available KVA = %zu (%zu MB)\n",
virtual_end - virtual_avail,
(virtual_end - virtual_avail) / 1048576);
if (bootverbose) {
int indx;
printf("Physical memory chunk(s):\n");
for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
vm_paddr_t size1 =
phys_avail[indx + 1] - phys_avail[indx];
#ifdef __powerpc64__
printf("0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n",
#else
printf("0x%09jx - 0x%09jx, %ju bytes (%ju pages)\n",
#endif
(uintmax_t)phys_avail[indx],
(uintmax_t)phys_avail[indx + 1] - 1,
(uintmax_t)size1, (uintmax_t)size1 / PAGE_SIZE);
}
}
vm_ksubmap_init(&kmi);
printf("avail memory = %ju (%ju MB)\n",
ptoa((uintmax_t)vm_free_count()),
ptoa((uintmax_t)vm_free_count()) / 1048576);
bufinit();
vm_pager_bufferinit();
}
extern vm_offset_t __startkernel, __endkernel;
extern unsigned char __bss_start[];
extern unsigned char __sbss_start[];
extern unsigned char __sbss_end[];
extern unsigned char _end[];
void aim_early_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry,
void *mdp, uint32_t mdp_cookie);
void aim_cpu_init(vm_offset_t toc);
void booke_cpu_init(void);
#ifdef DDB
static void load_external_symtab(void);
#endif
uintptr_t
powerpc_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp,
uint32_t mdp_cookie)
{
struct pcpu *pc;
struct cpuref bsp;
vm_offset_t startkernel, endkernel;
char *env;
bool ofw_bootargs = false;
#ifdef DDB
bool symbols_provided = false;
vm_offset_t ksym_start;
vm_offset_t ksym_end;
#endif
startkernel = __startkernel;
endkernel = __endkernel;
if (mdp_cookie != 0xfb5d104d)
mdp = NULL;
#if !defined(BOOKE)
bzero(__sbss_start, __sbss_end - __sbss_start);
bzero(__bss_start, _end - __bss_start);
#endif
cpu_feature_setup();
#ifdef AIM
aim_early_init(fdt, toc, ofentry, mdp, mdp_cookie);
#endif
if (mdp != NULL) {
char *envp = NULL;
uintptr_t md_offset = 0;
vm_paddr_t kernelendphys;
#ifdef AIM
if ((uintptr_t)&powerpc_init > DMAP_BASE_ADDRESS)
md_offset = DMAP_BASE_ADDRESS;
#else
md_offset = VM_MIN_KERNEL_ADDRESS - kernload;
#endif
preload_metadata = mdp;
if (md_offset > 0) {
preload_metadata += md_offset;
preload_bootstrap_relocate(md_offset);
}
preload_initkmdp(true);
boothowto = MD_FETCH(preload_kmdp, MODINFOMD_HOWTO, int);
envp = MD_FETCH(preload_kmdp, MODINFOMD_ENVP, char *);
if (envp != NULL)
envp += md_offset;
init_static_kenv(envp, 0);
if (fdt == 0) {
fdt = MD_FETCH(preload_kmdp, MODINFOMD_DTBP, uintptr_t);
if (fdt != 0)
fdt += md_offset;
}
kernelendphys = MD_FETCH(preload_kmdp, MODINFOMD_KERNEND,
vm_offset_t);
if (kernelendphys != 0)
kernelendphys += md_offset;
endkernel = ulmax(endkernel, kernelendphys);
#ifdef DDB
ksym_start = MD_FETCH(preload_kmdp, MODINFOMD_SSYM, uintptr_t);
ksym_end = MD_FETCH(preload_kmdp, MODINFOMD_ESYM, uintptr_t);
db_fetch_ksymtab(ksym_start, ksym_end, md_offset);
symbols_provided = true;
#endif
} else {
fake_preload_metadata();
preload_initkmdp(true);
init_static_kenv(init_kenv, sizeof(init_kenv));
ofw_bootargs = true;
}
OF_initial_setup((void *)fdt, NULL, (int (*)(void *))ofentry);
init_param1();
proc_linkup0(&proc0, &thread0);
thread0.td_frame = &frame0;
#ifdef __powerpc64__
__asm __volatile("mr 13,%0" :: "r"(&thread0));
#else
__asm __volatile("mr 2,%0" :: "r"(&thread0));
#endif
mutex_init();
OF_bootstrap();
#ifdef DDB
if (!symbols_provided && hw_direct_map)
load_external_symtab();
#endif
if (ofw_bootargs)
ofw_parse_bootargs();
#ifdef AIM
pmap_early_io_map_init();
#endif
cninit();
#ifdef AIM
aim_cpu_init(toc);
#else
booke_cpu_init();
__syncicache((caddr_t)startkernel, endkernel - startkernel);
#endif
platform_probe_and_attach();
if (platform_smp_get_bsp(&bsp) != 0)
bsp.cr_cpuid = 0;
pc = &__pcpu[bsp.cr_cpuid];
__asm __volatile("mtsprg 0, %0" :: "r"(pc));
pcpu_init(pc, bsp.cr_cpuid, sizeof(struct pcpu));
pc->pc_curthread = &thread0;
thread0.td_oncpu = bsp.cr_cpuid;
pc->pc_cpuid = bsp.cr_cpuid;
pc->pc_hwref = bsp.cr_hwref;
kdb_init();
pmap_mmu_init();
sched_instance_select();
link_elf_ireloc();
pmap_bootstrap(startkernel, endkernel);
mtmsr(psl_kernset & ~PSL_EE);
init_param2(physmem);
env = kern_getenv("kernelname");
if (env != NULL) {
strlcpy(kernelname, env, sizeof(kernelname));
freeenv(env);
}
thread0.td_pcb = (struct pcb *)__align_down(thread0.td_kstack +
thread0.td_kstack_pages * PAGE_SIZE - sizeof(struct pcb), 16);
bzero((void *)thread0.td_pcb, sizeof(struct pcb));
pc->pc_curpcb = thread0.td_pcb;
msgbufinit(msgbufp, msgbufsize);
#ifdef KDB
if (boothowto & RB_KDB)
kdb_enter(KDB_WHY_BOOTFLAGS,
"Boot flags requested debugger");
#endif
return (((uintptr_t)thread0.td_pcb -
(sizeof(struct callframe) - 3*sizeof(register_t))) & ~15UL);
}
#ifdef DDB
static void
load_external_symtab(void) {
phandle_t chosen;
vm_paddr_t start, end;
pcell_t cell[2];
ssize_t size;
u_char *kernelimg;
u_char *kernelimg_final;
int i;
Elf_Ehdr *ehdr;
Elf_Shdr *shdr;
vm_offset_t ksym_start, ksym_sz, kstr_start, kstr_sz,
ksym_start_final, kstr_start_final;
if (!hw_direct_map)
return;
chosen = OF_finddevice("/chosen");
if (chosen <= 0)
return;
if (!OF_hasprop(chosen, "linux,initrd-start") ||
!OF_hasprop(chosen, "linux,initrd-end"))
return;
size = OF_getencprop(chosen, "linux,initrd-start", cell, sizeof(cell));
if (size == 4)
start = cell[0];
else if (size == 8)
start = (uint64_t)cell[0] << 32 | cell[1];
else
return;
size = OF_getencprop(chosen, "linux,initrd-end", cell, sizeof(cell));
if (size == 4)
end = cell[0];
else if (size == 8)
end = (uint64_t)cell[0] << 32 | cell[1];
else
return;
if (!(end - start > 0))
return;
kernelimg_final = (u_char *) PHYS_TO_DMAP(start);
#ifdef AIM
kernelimg = kernelimg_final;
#else
kernelimg = (u_char *)pmap_early_io_map(start, PAGE_SIZE);
#endif
ehdr = (Elf_Ehdr *)kernelimg;
if (!IS_ELF(*ehdr)) {
#ifdef BOOKE
pmap_early_io_unmap(start, PAGE_SIZE);
#endif
return;
}
#ifdef BOOKE
pmap_early_io_unmap(start, PAGE_SIZE);
kernelimg = (u_char *)pmap_early_io_map(start, (end - start));
#endif
shdr = (Elf_Shdr *)(kernelimg + ehdr->e_shoff);
ksym_start = 0;
ksym_sz = 0;
ksym_start_final = 0;
kstr_start = 0;
kstr_sz = 0;
kstr_start_final = 0;
for (i = 0; i < ehdr->e_shnum; i++) {
if (shdr[i].sh_type == SHT_SYMTAB) {
ksym_start = (vm_offset_t)(kernelimg +
shdr[i].sh_offset);
ksym_start_final = (vm_offset_t)
(kernelimg_final + shdr[i].sh_offset);
ksym_sz = (vm_offset_t)(shdr[i].sh_size);
kstr_start = (vm_offset_t)(kernelimg +
shdr[shdr[i].sh_link].sh_offset);
kstr_start_final = (vm_offset_t)
(kernelimg_final +
shdr[shdr[i].sh_link].sh_offset);
kstr_sz = (vm_offset_t)
(shdr[shdr[i].sh_link].sh_size);
}
}
if (ksym_start != 0 && kstr_start != 0 && ksym_sz != 0 &&
kstr_sz != 0 && ksym_start < kstr_start) {
ksymtab = ksym_start_final;
ksymtab_size = ksym_sz;
kstrtab = kstr_start_final;
ksymtab_relbase = (__startkernel - KERNBASE);
}
#ifdef BOOKE
pmap_early_io_unmap(start, (end - start));
#endif
};
#endif
static void
fake_preload_metadata(void) {
static uint32_t fake_preload[36] __aligned(8);
int i = 0;
fake_preload[i++] = MODINFO_NAME;
fake_preload[i++] = strlen("kernel") + 1;
strcpy((char *)&fake_preload[i], "kernel");
i += 2;
fake_preload[i++] = MODINFO_TYPE;
fake_preload[i++] = strlen(preload_kerntype) + 1;
strcpy((char *)&fake_preload[i], preload_kerntype);
i += howmany(fake_preload[i - 1], sizeof(uint32_t));
#ifdef __powerpc64__
fake_preload[i++] = 0;
#endif
fake_preload[i++] = MODINFO_ADDR;
fake_preload[i++] = sizeof(vm_offset_t);
*(vm_offset_t *)&fake_preload[i] =
(vm_offset_t)(__startkernel);
i += (sizeof(vm_offset_t) / 4);
fake_preload[i++] = MODINFO_SIZE;
fake_preload[i++] = sizeof(vm_offset_t);
*(vm_offset_t *)&fake_preload[i] =
(vm_offset_t)(__endkernel) - (vm_offset_t)(__startkernel);
i += (sizeof(vm_offset_t) / 4);
fake_preload[i++] = 0;
fake_preload[i] = 0;
preload_metadata = (void*)fake_preload;
}
void
cpu_flush_dcache(void *ptr, size_t len)
{
register_t addr, off;
addr = (uintptr_t)ptr;
off = addr & (cacheline_size - 1);
addr -= off;
len = roundup2(len + off, cacheline_size);
while (len > 0) {
__asm __volatile ("dcbf 0,%0" :: "r"(addr));
__asm __volatile ("sync");
addr += cacheline_size;
len -= cacheline_size;
}
}
int
ptrace_set_pc(struct thread *td, unsigned long addr)
{
struct trapframe *tf;
tf = td->td_frame;
tf->srr0 = (register_t)addr;
return (0);
}
void
spinlock_enter(void)
{
struct thread *td;
register_t msr;
td = curthread;
if (td->td_md.md_spinlock_count == 0) {
nop_prio_mhigh();
msr = intr_disable();
td->td_md.md_spinlock_count = 1;
td->td_md.md_saved_msr = msr;
critical_enter();
} else
td->td_md.md_spinlock_count++;
}
void
spinlock_exit(void)
{
struct thread *td;
register_t msr;
td = curthread;
msr = td->td_md.md_saved_msr;
td->td_md.md_spinlock_count--;
if (td->td_md.md_spinlock_count == 0) {
critical_exit();
intr_restore(msr);
nop_prio_medium();
}
}
extern register_t get_spr(int);
#ifdef DDB
DB_SHOW_COMMAND(spr, db_show_spr)
{
register_t spr;
volatile uint32_t *p;
int sprno, saved_sprno;
if (!have_addr)
return;
saved_sprno = sprno = (intptr_t) addr;
sprno = ((sprno & 0x3e0) >> 5) | ((sprno & 0x1f) << 5);
p = (uint32_t *)(void *)&get_spr;
#ifdef __powerpc64__
#if defined(_CALL_ELF) && _CALL_ELF == 2
p += 2;
#else
p = *(volatile uint32_t * volatile *)p;
#endif
#endif
*p = (*p & ~0x001ff800) | (sprno << 11);
__syncicache(__DEVOLATILE(uint32_t *, p), cacheline_size);
spr = get_spr(sprno);
db_printf("SPR %d(%x): %lx\n", saved_sprno, saved_sprno,
(unsigned long)spr);
}
DB_SHOW_COMMAND(frame, db_show_frame)
{
struct trapframe *tf;
long reg;
int i;
tf = have_addr ? (struct trapframe *)addr : curthread->td_frame;
db_printf("trap frame %p\n", tf);
for (i = 0; i < nitems(tf->fixreg); i++) {
reg = tf->fixreg[i];
db_printf(" r%d:\t%#lx (%ld)\n", i, reg, reg);
}
reg = tf->lr;
db_printf(" lr:\t%#lx\n", reg);
reg = tf->cr;
db_printf(" cr:\t%#lx\n", reg);
reg = tf->xer;
db_printf(" xer:\t%#lx\n", reg);
reg = tf->ctr;
db_printf(" ctr:\t%#lx (%ld)\n", reg, reg);
reg = tf->srr0;
db_printf(" srr0:\t%#lx\n", reg);
reg = tf->srr1;
db_printf(" srr1:\t%#lx\n", reg);
reg = tf->exc;
db_printf(" exc:\t%#lx\n", reg);
reg = tf->dar;
db_printf(" dar:\t%#lx\n", reg);
#ifdef AIM
reg = tf->cpu.aim.dsisr;
db_printf(" dsisr:\t%#lx\n", reg);
#else
reg = tf->cpu.booke.esr;
db_printf(" esr:\t%#lx\n", reg);
reg = tf->cpu.booke.dbcr0;
db_printf(" dbcr0:\t%#lx\n", reg);
#endif
}
#endif
#if !defined(__powerpc64__)
extern void __stack_chk_fail(void);
void __stack_chk_fail_local(void);
void
__stack_chk_fail_local(void)
{
__stack_chk_fail();
}
#endif