pa
uint64* pa = (uint64*)a.data;
return (*pa == *pb);
void mmu_map_page(unsigned int vsid, unsigned long pa, unsigned long va);;
void mmu_map_page(unsigned int vsid, unsigned long pa, unsigned long va);;
phys_addr_t pa, uint8 attributes);
struct pci_attach_args *pa = aux;
pci_chipset_tag_t pc = pa->pa_pc;
pci_set_powerstate(pa, PCI_PMCSR_STATE_D0);
pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
if (pci_mapreg_map(pa, RGE_PCI_BAR2, PCI_MAPREG_TYPE_MEM |
if (pci_mapreg_map(pa, RGE_PCI_BAR1, PCI_MAPREG_TYPE_MEM |
if (pci_mapreg_map(pa, RGE_PCI_BAR0, PCI_MAPREG_TYPE_IO,
if (pci_intr_map_msix(pa, 0, &ih) == 0 ||
pci_intr_map_msi(pa, &ih) == 0)
else if (pci_intr_map(pa, &ih) != 0) {
sc->sc_dmat = pa->pa_dmat;
sc->sc_pc = pa->pa_pc;
sc->sc_tag = pa->pa_tag;
if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCI_PCIE_LCSR,
uint32_t pa, struct ath_desc *nds, uint64_t tsf,
if ((ands->ds_status1 & AR_Done) == 0 && OS_REG_READ(ah, AR_RXDP) == pa)
uint32_t pa, struct ath_desc *nds, uint64_t tsf,
if ((ands->ds_status1 & AR_Done) == 0 && OS_REG_READ(ah, AR_RXDP) == pa)
uint32_t pa, struct ath_desc *nds, uint64_t tsf,
if ((ands->ds_rxstatus1&AR_Done) == 0 && OS_REG_READ(ah, AR_RXDP) == pa)
uint32_t pa, struct ath_desc *nds, uint64_t tsf,
struct pci_attach_args *pa = aux;
return pci_matchbyid(pa, iwx_devices, nitems(iwx_devices));
struct pci_attach_args *pa = aux;
sc->sc_pid = PCI_PRODUCT(pa->pa_id);
sc->sc_pct = pa->pa_pc;
sc->sc_pcitag = pa->pa_tag;
sc->sc_dmat = pa->pa_dmat;
memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
if (pci_intr_map_msix(pa, 0, &ih) == 0) {
} else if (pci_intr_map_msi(pa, &ih)) {
if (pci_intr_map(pa, &ih)) {
#define PCI_PRODUCT(pa) pci_get_device(dev)
switch (PCI_PRODUCT(pa->pa_id)) {
struct pci_attach_args *pa = aux;
sc->sc_pct = pa->pa_pc;
sc->sc_pcitag = pa->pa_tag;
sc->sc_dmat = pa->pa_dmat;
memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
if (pci_intr_map_msix(pa, 0, &ih) == 0) {
} else if (pci_intr_map_msi(pa, &ih)) {
if (pci_intr_map(pa, &ih)) {
#define PCI_PRODUCT(pa) pci_get_device(dev)
switch (PCI_PRODUCT(pa->pa_id)) {
if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_INTEL_WL_9560_3) {
struct pci_attach_args *pa = aux;
sc->sc_pct = pa->pa_pc;
sc->sc_pcitag = pa->pa_tag;
sc->sc_dmat = pa->pa_dmat;
memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WPI_PCI_BAR0);
error = pci_mapreg_map(pa, WPI_PCI_BAR0, memtype, 0, &sc->sc_st,
if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
#define RTWN_CMD_IQ_EXT_PA_5G(pa) (pa)
point_line_distance(BPoint point, BPoint pa, BPoint pb)
double b = point_point_distance(point, pa);
double c = point_point_distance(pa, pb);
currentDist = fabs(point_line_distance(pa.x, pa.y, pb.x, pb.y,
struct pfil_head_args pa;
pa.pa_version = PFIL_VERSION;
pa.pa_flags = PFIL_IN;
pa.pa_type = PFIL_TYPE_ETHERNET;
pa.pa_headname = if_name(ctx->ifc_ifp);
pfil = pfil_head_register(&pa);
vm_addr_align_ok(vm_paddr_t pa, u_long alignment)
return ((pa & (alignment - 1)) == 0);
vm_addr_bound_ok(vm_paddr_t pa, vm_paddr_t size, vm_paddr_t boundary)
return (((pa ^ (pa + size - 1)) & -boundary) == 0);
vm_addr_ok(vm_paddr_t pa, vm_paddr_t size, u_long alignment,
return (vm_addr_align_ok(pa, alignment) &&
vm_addr_bound_ok(pa, size, boundary));
#define pci_mapreg_map(pa, reg, type, flags, tagp, handlep, basep, sizep, maxsize) \
#define pci_intr_establish(pa, ih, level, func, arg, what) \
char *pa, *pb, *pc, *pd, *pl, *pm, *pn;
pa = pb = (char *)a + es;
swap(pa, pb);
pa += es;
r = min(pa - (char *)a, pb - pa);
if ((r = pb - pa) > es)
cmp_ids(const void *pa, const void *pb, void *dp)
Id a = *(Id *)pa;
status_t (*set_tt)(int which, addr_t pa, size_t len, uint32 perms);
status_t (*load_rp)(addr_t pa);
void (*map_page)(addr_t virtualAddress, addr_t pa, uint32 flags);
set_tt(int which, addr_t pa, size_t len, uint32 perms)
TRACE(("mmu_030:set_tt(%d, 0x%lx, 0x%lx, 0x%08lx)\n", which, pa, len, perms));
ttr |= (pa & 0xff000000);
load_rp(addr_t pa)
TRACE(("mmu_030:load_rp(0x%lx)\n", pa));
entry.addr = TA_TO_PREA(((addr_t)pa));
ttr |= (pa & 0xff000000);
load_rp(addr_t pa)
TRACE(("mmu_040:load_rp(0x%lx)\n", pa));
if (pa & ((1 << 9) - 1)) {
page_directory_entry *pr = (page_directory_entry *)pa;
: : "d"(pa));
set_tt(int which, addr_t pa, size_t len, uint32 perms /* NOTUSED */)
TRACE(("mmu_040:set_tt(%d, 0x%lx, 0x%lx, 0x%08lx)\n", which, pa, len, perms));
status_t (*set_tt)(int which, addr_t pa, size_t len, uint32 perms);
status_t (*load_rp)(addr_t pa);
void (*map_page)(addr_t virtualAddress, addr_t pa, uint32 flags);
status_t (*set_tt)(int which, addr_t pa, size_t len, uint32 perms);
status_t (*load_rp)(addr_t pa);
void (*map_page)(addr_t virtualAddress, addr_t pa, uint32 flags);
status_t (*set_tt)(int which, addr_t pa, size_t len, uint32 perms);
status_t (*load_rp)(addr_t pa);
void (*map_page)(addr_t virtualAddress, addr_t pa, uint32 flags);
arch_vm_translation_map_early_map(kernel_args *args, addr_t va, phys_addr_t pa,
TRACE("early_tmap: entry pa 0x%lx va 0x%lx\n", pa, va);
return gARMPagingMethod->MapEarly(args, va, pa, attributes);
ARMVMTranslationMap32Bit::Map(addr_t va, phys_addr_t pa, uint32 attributes,
TRACE("map_tmap: entry pa 0x%lx va 0x%lx\n", pa, va);
ARMPagingMethod32Bit::PutPageTableEntryInTable(&pt[index], pa, attributes,
VMSAv8TranslationMap::TableFromPa(phys_addr_t pa)
return reinterpret_cast<uint64_t*>(KERNEL_PMAP_BASE + pa);
VMSAv8TranslationMap::Map(addr_t va, phys_addr_t pa, uint32 attributes, uint32 memoryType,
", 0x%x, 0x%x)\n", va, pa, attributes, memoryType);
phys_addr_t effectivePa = effectiveVa - va + pa;
VMSAv8TranslationMap::Query(addr_t va, phys_addr_t* pa, uint32* flags)
*pa = 0;
*pa = pte & kPteAddrMask;
uint64_t* TableFromPa(phys_addr_t pa);
TableFromPa(phys_addr_t pa)
return reinterpret_cast<uint64_t*>(KERNEL_PMAP_BASE + pa);
TableFromPa(phys_addr_t pa)
return reinterpret_cast<uint64_t*>(KERNEL_PMAP_BASE + pa);
map_page_early(phys_addr_t ptPa, int level, addr_t va, phys_addr_t pa,
atomic_set64((int64*) pte, pa | 0x3);
map_page_early(table, level + 1, va, pa, args);
arch_vm_translation_map_early_map(kernel_args* args, addr_t va, phys_addr_t pa, uint8 attributes)
pa |= VMSAv8TranslationMap::GetMemoryAttr(attributes, 0, true);
map_page_early(ptPa, level, va, pa, args);
*va = paddr_desc[index].va + pa % sIOSpaceChunkSize;
generic_get_physical_page(phys_addr_t pa, addr_t *va, uint32 flags)
index = pa / sIOSpaceChunkSize;
*va = paddr_desc[index].va + pa % sIOSpaceChunkSize;
*va = paddr_desc[index].va + pa % sIOSpaceChunkSize;
status_t generic_get_physical_page(phys_addr_t pa, addr_t *va, uint32 flags);
arch_vm_translation_map_early_map(kernel_args *args, addr_t va, phys_addr_t pa,
TRACE("early_tmap: entry pa 0x%lx va 0x%lx\n", pa, va);
return gM68KPagingMethod->MapEarly(args, va, pa, attributes);
map_iospace_chunk(addr_t va, addr_t pa, uint32 flags)
pa &= ~(B_PAGE_SIZE - 1); // make sure it's page aligned
for (i = 0; i < NUM_PAGEENT_PER_TBL; i++, pa += B_PAGE_SIZE) {
pt[i].addr = TA_TO_PTEA(pa);
get_physical_page_tmap_internal(addr_t pa, addr_t *va, uint32 flags)
return generic_get_physical_page(pa, va, flags);
static status_t get_physical_page_tmap_internal(addr_t pa, addr_t *va, uint32 flags);
m68k_vm_translation_map_early_map(kernel_args *args, addr_t va, addr_t pa,
TRACE(("early_tmap: entry pa 0x%lx va 0x%lx\n", pa, va));
put_page_table_entry_in_pgtable(&pt[index], pa, attributes,
addr_t pa;
pa = PRE_TO_TA(pr[index]);
pd = (page_directory_entry *)pa;
pa = PDE_TO_TA(pd[index]);
pt = (page_table_entry *)pa;
pa = PIE_TO_TA(pi[index]);
pt = (page_table_entry *)pa;
map_tmap(vm_translation_map *map, addr_t va, addr_t pa, uint32 attributes)
TRACE(("map_tmap: entry pa 0x%lx va 0x%lx\n", pa, va));
put_page_table_entry_in_pgtable(&pt[pindex], pa, attributes,
phys_addr_t pa = physicalAddress;
TRACE("040::MapEarly: entry pa 0x%lx va 0x%lx\n", pa, va);
addr_t pa;
pa = PRE_TO_TA(pr[index]);
pd = (page_directory_entry *)pa;
pa = PDE_TO_TA(pd[index]);
pt = (page_table_entry *)pa;
pa = PIE_TO_TA(pi[index]);
pt = (page_table_entry *)pa;
M68KVMTranslationMap040::Map(addr_t va, phys_addr_t pa, uint32 attributes,
TRACE("M68KVMTranslationMap040::Map: entry pa 0x%lx va 0x%lx\n", pa, va);
M68KPagingMethod040::PutPageTableEntryInTable(&pt[pindex], pa, attributes,
arch_vm_translation_map_early_map(kernel_args *args, addr_t va, phys_addr_t pa,
TRACE("early_tmap: entry pa %#" B_PRIxPHYSADDR " va %#" B_PRIxADDR "\n", pa,
return gPPCPagingMethod->MapEarly(args, va, pa, attributes);
map_iospace_chunk(addr_t va, phys_addr_t pa, uint32 flags)
pa &= ~(B_PAGE_SIZE - 1); // make sure it's page aligned
return ppc_map_address_range(va, pa, IOSPACE_CHUNK_SIZE);
TRACE("map_tmap: entry pa 0x%lx va 0x%lx\n", pa, va);
PPCPagingMethod460::PutPageTableEntryInTable(&pt[index], pa, attributes,
map_iospace_chunk(addr_t va, phys_addr_t pa, uint32 flags)
pa &= ~(B_PAGE_SIZE - 1); // make sure it's page aligned
return ppc_map_address_range(va, pa, IOSPACE_CHUNK_SIZE);
TRACE("map_tmap: entry pa 0x%lx va 0x%lx\n", pa, va);
PPCPagingMethodClassic::PutPageTableEntryInTable(&pt[index], pa, attributes,
arch_vm_translation_map_early_map(kernel_args *args, addr_t va, phys_addr_t pa,
TRACE("early_tmap: entry pa 0x%lx va 0x%lx\n", pa, va);
arch_vm_translation_map_early_map(kernel_args *args, addr_t va, phys_addr_t pa,
TRACE("early_tmap: entry pa %#" B_PRIxPHYSADDR " va %#" B_PRIxADDR "\n", pa,
return gX86PagingMethod->MapEarly(args, va, pa, attributes);
X86VMTranslationMap32Bit::Map(addr_t va, phys_addr_t pa, uint32 attributes,
TRACE("map_tmap: entry pa 0x%lx va 0x%lx\n", pa, va);
X86PagingMethod32Bit::PutPageTableEntryInTable(&pt[index], pa, attributes,
pa[8] = {
P = pa[0] + s * (pa[1] + s * (pa[2] +
s * (pa[3] + s * (pa[4] + s * (pa[5] + s * (pa[6] + s * pa[7]))))));
char *pa, *pb, *pc, *pd, *pl, *pm, *pn;
pa = pb = (char *)a + es;
swapfunc(pa, pb, es);
pa += es;
d1 = MIN(pa - (char *)a, pb - pa);
d1 = pb - pa;