AMD64_PAGE_SIZE
PML4 = _kvm_malloc(kd, AMD64_PAGE_SIZE);
if (kvm_read2(kd, pa, PML4, AMD64_PAGE_SIZE) != AMD64_PAGE_SIZE) {
return (AMD64_PAGE_SIZE - offset);
return (AMD64_PAGE_SIZE - offset);
return (AMD64_PAGE_SIZE - (pa & AMD64_PAGE_MASK));
return (AMD64_PAGE_SIZE - (pa & AMD64_PAGE_MASK));
#define AMD64_NPTEPG (AMD64_PAGE_SIZE / sizeof(amd64_pte_t))
#define AMD64_PAGE_MASK (AMD64_PAGE_SIZE - 1)
#define AMD64_NPDEPG (AMD64_PAGE_SIZE / sizeof(amd64_pde_t))
#define AMD64_NPDPEPG (AMD64_PAGE_SIZE / sizeof(amd64_pdpe_t))
#define AMD64_NPML4EPG (AMD64_PAGE_SIZE / sizeof(amd64_pml4e_t))
_Static_assert(PAGE_SIZE == AMD64_PAGE_SIZE, "PAGE_SIZE mismatch");
return _kvm_map_get(kd, *pa & AMD64_PG_FRAME, AMD64_PAGE_SIZE);
dump_avail_off = AMD64_PAGE_SIZE + amd64_round_page(vmst->hdr.msgbufsize);
vmst->hdr.bitmapsize, off, sparse_off, AMD64_PAGE_SIZE) == -1) {
ofs = _kvm_pt_find(kd, a, AMD64_PAGE_SIZE);
return (AMD64_PAGE_SIZE - offset);
ofs = _kvm_pt_find(kd, a, AMD64_PAGE_SIZE);
return (AMD64_PAGE_SIZE - offset);
ofs = _kvm_pt_find(kd, a, AMD64_PAGE_SIZE);
if (pread(kd->pmfd, &pt, AMD64_PAGE_SIZE, ofs) !=
AMD64_PAGE_SIZE) {
ofs = _kvm_pt_find(kd, a, AMD64_PAGE_SIZE);
return (AMD64_PAGE_SIZE - offset);
ofs = _kvm_pt_find(kd, a, AMD64_PAGE_SIZE);
return (AMD64_PAGE_SIZE - offset);
unsigned int pgsz = AMD64_PAGE_SIZE;
_kvm_bitmap_set(&bm, _kvm_pa_bit_id(kd, pa, AMD64_PAGE_SIZE));
_kvm_pa_bit_id(kd, pa, AMD64_PAGE_SIZE));
va += AMD64_PAGE_SIZE;
pa = _kvm_bit_id_pa(kd, bmindex, AMD64_PAGE_SIZE);
#define amd64_round_page(x) roundup2((kvaddr_t)(x), AMD64_PAGE_SIZE)
(VM_IS_V1(vm) ? ((va) & (AMD64_PAGE_SIZE - 1)) : ((va) & AMD64_PAGE_MASK))