#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/conf.h>
#include <sys/interrupt.h>
#include <sys/kernel.h>
#include <sys/kexec.h>
#include <vm/vm.h>
#include <vm/vm_extern.h>
#include <vm/vm_object.h>
#include <vm/vm_phys.h>
#include <vm/pmap.h>
#include <vm/vm_page.h>
#include <vm/vm_radix.h>
#include <machine/intr_machdep.h>
#include <machine/kexec.h>
#include <machine/md_var.h>
#include <machine/pmap.h>
#include <x86/apicvar.h>
static void
kexec_generate_page_tables(pml4_entry_t *root, vm_offset_t start,
vm_size_t size, bool do_pte, bool identity, struct pctrie_iter *pages)
{
vm_paddr_t mpa;
vm_offset_t pg;
vm_size_t stride = do_pte ? PAGE_SIZE : NBPDR;
vm_page_t m;
vm_pindex_t i, j, k, l;
pg = start & ~(stride - 1);
i = pmap_pml4e_index(pg);
j = pmap_pdpe_index(pg);
k = pmap_pde_index(pg);
l = pmap_pte_index(pg);
for (; pg < start + size; i++, j = 0, k = 0, l = 0) {
if (root[i] == 0) {
m = vm_radix_iter_next(pages);
mpa = VM_PAGE_TO_PHYS(m);
root[i] = mpa | PG_RW | PG_V;
}
pdp_entry_t *pdp =
(pdp_entry_t *)(PHYS_TO_DMAP(root[i] & PG_FRAME));
for (; j < NPDPEPG && pg < start + size; j++, k = 0, l = 0) {
if (pdp[j] == 0) {
m = vm_radix_iter_next(pages);
mpa = VM_PAGE_TO_PHYS(m);
pdp[j] = mpa | PG_RW | PG_V;
}
pd_entry_t *pde =
(pd_entry_t *)(PHYS_TO_DMAP(pdp[j] & PG_FRAME));
for (; k < NPDEPG && pg < start + size; k++, l = 0) {
if (pde[k] == 0) {
if (!do_pte) {
pde[k] =
(identity ? pg : pmap_kextract(pg)) |
PG_RW | PG_PS | PG_V;
pg += NBPDR;
continue;
}
m = vm_radix_iter_next(pages);
mpa = VM_PAGE_TO_PHYS(m);
pde[k] = mpa | PG_V | PG_RW;
} else if ((pde[k] & PG_PS) != 0) {
pg += NBPDR;
continue;
}
for (; l < NPTEPG && pg < start + size;
l++, pg += PAGE_SIZE) {
pt_entry_t *pte =
(pt_entry_t *)PHYS_TO_DMAP(pde[pmap_pde_index(pg)] & PG_FRAME);
pte[pmap_pte_index(pg)] =
pmap_kextract(pg) | PG_RW | PG_V;
}
}
}
}
}
void
kexec_reboot_md(struct kexec_image *image)
{
void (*kexec_do_tramp)(void) = image->md_image;
intr_disable_all();
lapic_disable();
kexec_do_reboot_trampoline(VM_PAGE_TO_PHYS(image->first_md_page),
kexec_do_tramp);
for (;;)
;
}
int
kexec_load_md(struct kexec_image *image)
{
struct pctrie_iter pct_iter;
pml4_entry_t *PT4;
pdp_entry_t *PDP_l;
pd_entry_t *PD_l0;
vm_offset_t va;
int i;
vm_offset_t pa_pdp_l, pa_pd_l0, pa_pd_l1, pa_pd_l2, pa_pd_l3;
vm_page_t m;
if (la57)
return (EINVAL);
vm_radix_iter_init(&pct_iter, &image->map_obj->rtree);
m = vm_radix_iter_lookup(&pct_iter, image->first_md_page->pindex);
va = (vm_offset_t)image->map_addr + ptoa(m->pindex);
PT4 = (void *)va;
va += PAGE_SIZE;
m = vm_radix_iter_next(&pct_iter);
pa_pdp_l = VM_PAGE_TO_PHYS(m);
PDP_l = (void *)va;
va += PAGE_SIZE;
m = vm_radix_iter_next(&pct_iter);
pa_pd_l0 = VM_PAGE_TO_PHYS(m);
PD_l0 = (void *)va;
va += PAGE_SIZE;
m = vm_radix_iter_next(&pct_iter);
pa_pd_l1 = VM_PAGE_TO_PHYS(m);
m = vm_radix_iter_next(&pct_iter);
pa_pd_l2 = VM_PAGE_TO_PHYS(m);
m = vm_radix_iter_next(&pct_iter);
pa_pd_l3 = VM_PAGE_TO_PHYS(m);
m = vm_radix_iter_next(&pct_iter);
PT4[0] = (pml4_entry_t)pa_pdp_l | PG_V | PG_RW;
PDP_l[0] = (pdp_entry_t)pa_pd_l0 | PG_V | PG_RW;
PDP_l[1] = (pdp_entry_t)pa_pd_l1 | PG_V | PG_RW;
PDP_l[2] = (pdp_entry_t)pa_pd_l2 | PG_V | PG_RW;
PDP_l[3] = (pdp_entry_t)pa_pd_l3 | PG_V | PG_RW;
for (i = 0; i < 4 * NPDEPG; i++) {
PD_l0[i] = ((pd_entry_t)i << PDRSHIFT) | PG_V |
PG_RW | PG_PS;
}
for (i = 0; i < KEXEC_SEGMENT_MAX; i++) {
struct kexec_segment_stage *s = &image->segments[i];
if (s->size == 0)
break;
kexec_generate_page_tables(PT4, s->target, s->size, false,
true, &pct_iter);
}
kexec_generate_page_tables(PT4, image->map_addr, image->map_size, true,
false, &pct_iter);
kexec_generate_page_tables(PT4,
trunc_page((vm_offset_t)kexec_do_reboot_trampoline),
PAGE_SIZE, true, false, &pct_iter);
KASSERT(m != NULL, ("kexec_load_md: Missing trampoline page!\n"));
image->md_image = (void *)(image->map_addr + ptoa(m->pindex));
bcopy(kexec_do_reboot, image->md_image, kexec_do_reboot_size);
bcopy(image, (void *)((vm_offset_t)image->md_image +
(vm_offset_t)&kexec_saved_image - (vm_offset_t)&kexec_do_reboot),
sizeof(*image));
return (0);
}
int
kexec_md_pages(struct kexec_segment *seg_in)
{
struct kexec_segment *segs = seg_in;
vm_size_t pages = 13;
vm_paddr_t cur_addr = (1UL << 32) - 1;
vm_size_t source_total = 0;
for (int i = 0; i < KEXEC_SEGMENT_MAX; i++) {
vm_offset_t start, end;
if (segs[i].memsz == 0)
break;
end = round_2mpage((vm_offset_t)segs[i].mem + segs[i].memsz);
start = trunc_2mpage((vm_offset_t)segs[i].mem);
start = max(start, cur_addr + 1);
source_total += round_2mpage(end - start);
if (end <= cur_addr + 1)
continue;
if (pmap_pml4e_index(end) != pmap_pml4e_index(cur_addr)) {
pages++;
pages += howmany(end - (start & ~PML4MASK), NBPML4);
pages += howmany(end - (start & ~PDPMASK), NBPDP);
pages += howmany(end - (start & ~PDRMASK), NBPDR);
} else if (pmap_pdpe_index(end) != pmap_pdpe_index(cur_addr)) {
pages++;
pages += howmany(end - (start & ~PDPMASK), NBPDP) - 1;
pages += howmany(end - (start & ~PDRMASK), NBPDR);
}
}
pages += howmany(source_total, NBPDR);
pages += howmany(source_total, NBPDP);
pages += howmany(source_total, NBPML4);
pages += howmany(pages * PAGE_SIZE, NBPDR);
pages += howmany(pages * PAGE_SIZE, NBPDP);
pages += howmany(pages * PAGE_SIZE, NBPML4);
pages += howmany(kexec_do_reboot_size, PAGE_SIZE);
return (pages);
}