pbe
struct pbe *pbe;
for (pbe = restore_pblist; pbe; pbe = pbe->next)
copy_page(pbe->orig_address, pbe->address);
DEFINE(HIBERN_PBE_ORIG, offsetof(struct pbe, orig_address));
DEFINE(HIBERN_PBE_ADDR, offsetof(struct pbe, address));
DEFINE(HIBERN_PBE_NEXT, offsetof(struct pbe, next));
OFFSET(PBE_ADDRESS, pbe, address);
OFFSET(PBE_ORIG_ADDRESS, pbe, orig_address);
OFFSET(PBE_NEXT, pbe, next);
DEFINE(PBE_SIZE, sizeof(struct pbe));
uint32_t pbe:12;
uint32_t pbe:12;
OFFSET(PBE_ADDRESS, pbe, address);
OFFSET(PBE_ORIG_ADDRESS, pbe, orig_address);
OFFSET(PBE_NEXT, pbe, next);
DEFINE(PBE_SIZE, sizeof(struct pbe));
OFFSET(pbe_address, pbe, address);
OFFSET(pbe_orig_address, pbe, orig_address);
OFFSET(pbe_next, pbe, next);
OFFSET(HIBERN_PBE_ADDR, pbe, address);
OFFSET(HIBERN_PBE_ORIG, pbe, orig_address);
OFFSET(HIBERN_PBE_NEXT, pbe, next);
DEFINE(PBE_ADDRESS, offsetof(struct pbe, address));
DEFINE(PBE_ORIG_ADDRESS, offsetof(struct pbe, orig_address));
DEFINE(PBE_NEXT, offsetof(struct pbe, next));
OFFSET(pbe_address, pbe, address);
OFFSET(pbe_orig_address, pbe, orig_address);
OFFSET(pbe_next, pbe, next);
DEFINE(PBE_ADDRESS, offsetof(struct pbe, address));
DEFINE(PBE_ORIG_ADDRESS, offsetof(struct pbe, orig_address));
DEFINE(PBE_NEXT, offsetof(struct pbe, next));
DEFINE(PBE_SIZE, sizeof(struct pbe));
u32 pbe = LIMA_PBE(addr);
vm->bts[pbe].cpu[bte] = 0;
u32 pbe = LIMA_PBE(va);
if (!vm->bts[pbe].cpu) {
vm->bts[pbe].cpu = dma_alloc_wc(
&vm->bts[pbe].dma, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
if (!vm->bts[pbe].cpu)
pts = vm->bts[pbe].dma;
pd = vm->pd.cpu + (pbe << LIMA_VM_NUM_PT_PER_BT_SHIFT);
vm->bts[pbe].cpu[bte] = pa | LIMA_VM_FLAGS_CACHE;
struct ocrdma_pbe *pbe;
pbe = pbl_tbl->va;
pbe->pa_lo = cpu_to_le32((u32) (buf_addr & PAGE_MASK));
pbe->pa_hi = cpu_to_le32((u32) upper_32_bits(buf_addr));
pbe++;
pbe = (struct ocrdma_pbe *)pbl_tbl->va;
struct ocrdma_pbe *pbe;
pbe = (struct ocrdma_pbe *)pbl_tbl->va;
pbe->pa_lo = cpu_to_le32(pg_addr);
pbe->pa_hi = cpu_to_le32(upper_32_bits(pg_addr));
pbe++;
pbe = (struct ocrdma_pbe *)pbl_tbl->va;
struct regpair *pbe;
pbe = (struct regpair *)pbl_table->va;
pbe += mr->npages % pbes_in_page;
pbe->lo = cpu_to_le32((u32)addr);
pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
struct regpair *pbe;
pbe = (struct regpair *)pbl_tbl->va;
if (!pbe) {
pbe->lo = cpu_to_le32(pg_addr);
pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
pbe++;
pbe = (struct regpair *)pbl_tbl->va;
struct siw_pble pbe[] __counted_by(max_buf);
struct siw_pble *pble = &pbl->pbe[i];
pbl = kzalloc_flex(*pbl, pbe, num_buf);
pble = pbl->pbe;
struct perf_branch_entry *pbe = &branch_stack->entries[nr_filtered];
if (!perf_entry_from_brbe_regset(i, pbe, event))
if (!filter_branch_record(pbe, branch_sample_type, event_type_mask))
struct pbe *next;
extern struct pbe *restore_pblist;
struct pbe *restore_pblist;
struct highmem_pbe *pbe;
pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
if (!pbe) {
pbe->orig_page = page;
pbe->copy_page = tmp;
pbe->copy_page = virt_to_page(kaddr);
pbe->next = highmem_pblist;
highmem_pblist = pbe;
#define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
struct pbe *pbe;
pbe = chain_alloc(ca, sizeof(struct pbe));
if (!pbe) {
pbe->orig_address = page_address(page);
pbe->address = __get_safe_page(ca->gfp_mask);
if (!pbe->address)
pbe->next = restore_pblist;
restore_pblist = pbe;
return pbe->address;
struct highmem_pbe *pbe = highmem_pblist;
if (!pbe)
while (pbe) {
swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
pbe = pbe->next;
D(pbe, 31)