#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/systm.h>
#include <sys/kexec.h>
#include <vm/vm.h>
#include <vm/vm_extern.h>
#include <vm/vm_object.h>
#include <vm/vm_phys.h>
#include <vm/vm_radix.h>
#include <vm/pmap.h>
#include <vm/vm_page.h>
#include <machine/armreg.h>
#include <machine/pmap.h>
#include <machine/pte.h>
extern pt_entry_t pagetable_l0_ttbr0_bootstrap[];
extern unsigned long initstack_end[];
void switch_stack(void *, void (*)(void *, void *, struct kexec_image *), void *);
#define SCTLR_EL1_NO_MMU (SCTLR_RES1 | SCTLR_LSMAOE | SCTLR_nTLSMD | \
SCTLR_EIS | SCTLR_TSCXT | SCTLR_EOS)
#define vm_page_offset(m) ((vm_offset_t)(m) - vm_page_base)
static inline vm_page_t
phys_vm_page(vm_page_t m, vm_offset_t vm_page_v, vm_paddr_t vm_page_p)
{
return ((vm_page_t)((vm_offset_t)m - vm_page_v + vm_page_p));
}
static void __aligned(16) __dead2
kexec_reboot_bottom( void *arg1 __unused, void *arg2 __unused,
struct kexec_image *image)
{
void (*e)(void) = (void *)image->entry;
vm_offset_t vm_page_base = (vm_offset_t)vm_page_array;
vm_paddr_t vm_page_phys = pmap_kextract((vm_offset_t)vm_page_array);
struct kexec_segment_stage *phys_segs =
(void *)pmap_kextract((vm_offset_t)&image->segments);
vm_paddr_t from_pa, to_pa;
vm_size_t size;
vm_page_t first, m, mp;
struct pctrie_iter pct_i;
first = NULL;
vm_radix_iter_init(&pct_i, &image->map_obj->rtree);
VM_RADIX_FORALL(m, &pct_i) {
if (first == NULL)
first = m;
else
SLIST_INSERT_AFTER(mp, m, plinks.s.ss);
mp = m;
}
WRITE_SPECIALREG(sctlr_el1, SCTLR_EL1_NO_MMU);
isb();
for (int i = 0; i < KEXEC_SEGMENT_MAX; i++) {
if (phys_segs[i].size == 0)
break;
to_pa = phys_segs[i].target;
for (vm_page_t p = phys_segs[i].first_page;
p != NULL && to_pa - phys_segs[i].target < phys_segs[i].size;
p = SLIST_NEXT(p, plinks.s.ss)) {
p = phys_vm_page(p, vm_page_base, vm_page_phys);
from_pa = p->phys_addr;
if (p->phys_addr == to_pa) {
to_pa += PAGE_SIZE;
continue;
}
for (size = PAGE_SIZE / sizeof(register_t);
size > 0; --size) {
*(register_t *)to_pa = *(register_t *)from_pa;
to_pa += sizeof(register_t);
from_pa += sizeof(register_t);
}
}
}
invalidate_icache();
e();
while (1)
;
}
void
kexec_reboot_md(struct kexec_image *image)
{
uintptr_t ptr;
register_t reg;
for (int i = 0; i < KEXEC_SEGMENT_MAX; i++) {
if (image->segments[i].size > 0)
cpu_dcache_inv_range((void *)PHYS_TO_DMAP(image->segments[i].target),
image->segments[i].size);
}
ptr = pmap_kextract((vm_offset_t)kexec_reboot_bottom);
serror_disable();
reg = pmap_kextract((vm_offset_t)pagetable_l0_ttbr0_bootstrap);
set_ttbr0(reg);
cpu_tlb_flushID();
typeof(kexec_reboot_bottom) *p = (void *)ptr;
switch_stack((void *)pmap_kextract((vm_offset_t)initstack_end),
p, image);
while (1)
;
}
int
kexec_load_md(struct kexec_image *image)
{
vm_paddr_t tmp;
pt_entry_t *pte;
pte = pagetable_l0_ttbr0_bootstrap;
pte -= (Ln_ENTRIES * 2);
tmp = pmap_kextract((vm_offset_t)kexec_reboot_bottom);
pte[pmap_l2_index(tmp)] = (tmp | L2_BLOCK | ATTR_AF | ATTR_S1_UXN);
tmp = pmap_kextract((vm_offset_t)initstack_end);
pte[pmap_l2_index(tmp)] = (tmp | L2_BLOCK | ATTR_AF | ATTR_S1_UXN);
tmp = pmap_kextract((vm_offset_t)&vm_page_array);
pte[pmap_l2_index(tmp)] = (tmp | L2_BLOCK | ATTR_AF | ATTR_S1_UXN);
return (0);
}