#include <sys/cdefs.h>
#include "opt_ddb.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/devmap.h>
#include <vm/vm.h>
#include <vm/vm_extern.h>
#include <vm/pmap.h>
#include <machine/vmparam.h>
#ifdef __arm__
#include <machine/pte.h>
#endif
#ifdef __HAVE_STATIC_DEVMAP
#define DEVMAP_PADDR_NOTFOUND ((vm_paddr_t)(-1))
static const struct devmap_entry *devmap_table;
static boolean_t devmap_bootstrap_done = false;
#define AKVA_DEVMAP_MAX_ENTRIES 32
static struct devmap_entry akva_devmap_entries[AKVA_DEVMAP_MAX_ENTRIES];
static u_int akva_devmap_idx;
#endif
static vm_offset_t akva_devmap_vaddr = DEVMAP_MAX_VADDR;
#if defined(__aarch64__) || defined(__riscv)
extern int early_boot;
#endif
#ifdef __HAVE_STATIC_DEVMAP
static void
devmap_dump_table(int (*prfunc)(const char *, ...))
{
const struct devmap_entry *pd;
if (devmap_table == NULL || devmap_table[0].pd_size == 0) {
prfunc("No static device mappings.\n");
return;
}
prfunc("Static device mappings:\n");
for (pd = devmap_table; pd->pd_size != 0; ++pd) {
prfunc(" 0x%08jx - 0x%08jx mapped at VA 0x%08jx\n",
(uintmax_t)pd->pd_pa,
(uintmax_t)(pd->pd_pa + pd->pd_size - 1),
(uintmax_t)pd->pd_va);
}
}
void
devmap_print_table(void)
{
devmap_dump_table(printf);
}
vm_offset_t
devmap_lastaddr(void)
{
const struct devmap_entry *pd;
vm_offset_t lowaddr;
if (akva_devmap_idx > 0)
return (akva_devmap_vaddr);
lowaddr = DEVMAP_MAX_VADDR;
for (pd = devmap_table; pd != NULL && pd->pd_size != 0; ++pd) {
if (lowaddr > pd->pd_va)
lowaddr = pd->pd_va;
}
return (lowaddr);
}
void
devmap_add_entry(vm_paddr_t pa, vm_size_t sz)
{
struct devmap_entry *m;
if (devmap_bootstrap_done)
panic("devmap_add_entry() after devmap_bootstrap()");
if (akva_devmap_idx == (AKVA_DEVMAP_MAX_ENTRIES - 1))
panic("AKVA_DEVMAP_MAX_ENTRIES is too small");
if (akva_devmap_idx == 0)
devmap_register_table(akva_devmap_entries);
if ((pa & L1_S_OFFSET) == 0 && (sz & L1_S_OFFSET) == 0) {
akva_devmap_vaddr = trunc_1mpage(akva_devmap_vaddr - sz);
} else {
akva_devmap_vaddr = trunc_page(akva_devmap_vaddr - sz);
}
m = &akva_devmap_entries[akva_devmap_idx++];
m->pd_va = akva_devmap_vaddr;
m->pd_pa = pa;
m->pd_size = sz;
}
void
devmap_register_table(const struct devmap_entry *table)
{
devmap_table = table;
}
void
devmap_bootstrap(void)
{
const struct devmap_entry *pd;
devmap_bootstrap_done = true;
if (devmap_table == NULL)
return;
for (pd = devmap_table; pd->pd_size != 0; ++pd) {
pmap_preboot_map_attr(pd->pd_pa, pd->pd_va, pd->pd_size,
VM_PROT_READ | VM_PROT_WRITE, VM_MEMATTR_DEVICE);
}
}
static void *
devmap_ptov(vm_paddr_t pa, vm_size_t size)
{
const struct devmap_entry *pd;
if (devmap_table == NULL)
return (NULL);
for (pd = devmap_table; pd->pd_size != 0; ++pd) {
if (pa >= pd->pd_pa && pa + size <= pd->pd_pa + pd->pd_size)
return ((void *)(pd->pd_va + (pa - pd->pd_pa)));
}
return (NULL);
}
static vm_paddr_t
devmap_vtop(void * vpva, vm_size_t size)
{
const struct devmap_entry *pd;
vm_offset_t va;
if (devmap_table == NULL)
return (DEVMAP_PADDR_NOTFOUND);
va = (vm_offset_t)vpva;
for (pd = devmap_table; pd->pd_size != 0; ++pd) {
if (va >= pd->pd_va && va + size <= pd->pd_va + pd->pd_size)
return ((vm_paddr_t)(pd->pd_pa + (va - pd->pd_va)));
}
return (DEVMAP_PADDR_NOTFOUND);
}
#endif
void *
pmap_mapdev(vm_paddr_t pa, vm_size_t size)
{
return (pmap_mapdev_attr(pa, size, VM_MEMATTR_DEVICE));
}
void *
pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
{
vm_offset_t va, offset;
#ifdef __HAVE_STATIC_DEVMAP
void * rva;
if ((rva = devmap_ptov(pa, size)) != NULL) {
KASSERT(ma == VM_MEMATTR_DEVICE,
("%s: Non-device mapping for pa %jx (type %x)", __func__,
(uintmax_t)pa, ma));
return (rva);
}
#endif
offset = pa & PAGE_MASK;
pa = trunc_page(pa);
size = round_page(size + offset);
#ifdef PMAP_MAPDEV_EARLY_SIZE
if (early_boot) {
akva_devmap_vaddr = trunc_page(akva_devmap_vaddr - size);
va = akva_devmap_vaddr;
KASSERT(va >= (VM_MAX_KERNEL_ADDRESS - PMAP_MAPDEV_EARLY_SIZE),
("%s: Too many early devmap mappings", __func__));
} else
#endif
#ifdef __aarch64__
if (size >= L2_SIZE && (pa & L2_OFFSET) == 0)
va = kva_alloc_aligned(size, L2_SIZE);
else if (size >= L3C_SIZE && (pa & L3C_OFFSET) == 0)
va = kva_alloc_aligned(size, L3C_SIZE);
else
#endif
va = kva_alloc(size);
if (!va)
panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
pmap_kenter(va, size, pa, ma);
return ((void *)(va + offset));
}
void
pmap_unmapdev(void *p, vm_size_t size)
{
vm_offset_t offset, va;
#ifdef __HAVE_STATIC_DEVMAP
if (devmap_vtop(p, size) != DEVMAP_PADDR_NOTFOUND)
return;
#endif
va = (vm_offset_t)p;
offset = va & PAGE_MASK;
va = trunc_page(va);
size = round_page(size + offset);
pmap_kremove_device(va, size);
kva_free(va, size);
}
#ifdef DDB
#ifdef __HAVE_STATIC_DEVMAP
#include <ddb/ddb.h>
DB_SHOW_COMMAND_FLAGS(devmap, db_show_devmap, DB_CMD_MEMSAFE)
{
devmap_dump_table(db_printf);
}
#endif
#endif