PFN_UP
*min = PFN_UP(memblock_start_of_DRAM());
end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end);
min = PFN_UP(memblock_start_of_DRAM());
max_pfn = max(max_pfn, PFN_UP(start + size));
min_low_pfn = PFN_UP(memblock_start_of_DRAM());
#define bootmem_startpg (PFN_UP(((unsigned long) _end) - PAGE_OFFSET + PHYS_OFFSET))
#define ARCH_PFN_OFFSET PFN_UP(PHYS_OFFSET)
PFN_UP(__pa_symbol(&__nosave_end)));
end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end);
unsigned long nosave_end_pfn = PFN_UP(__pa(&__nosave_end));
# define ARCH_PFN_OFFSET PFN_UP(PHYS_OFFSET)
return PFN_UP(end);
ARCH_PFN_OFFSET = PFN_UP(ramstart);
if (PFN_UP(ramstart) > ARCH_PFN_OFFSET) {
(unsigned long)((PFN_UP(ramstart) - ARCH_PFN_OFFSET) * sizeof(struct page)),
(unsigned long)(PFN_UP(ramstart) - ARCH_PFN_OFFSET));
unsigned long kernel_end_pfn = PFN_UP(__pa_symbol(&_end));
for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
unsigned long nosave_end_pfn = PFN_UP(__pa(&__nosave_end));
slot_freepfn += PFN_UP(sizeof(struct pglist_data) +
#define ARCH_PFN_OFFSET PFN_UP(PHYS_OFFSET)
*min = PFN_UP(memblock_start_of_DRAM());
ram_start_pfn = PFN_UP(memory_start);
unsigned long end_pfn = PFN_UP(start + size);
unsigned long numpages = PFN_UP((unsigned long)_einittext) -
numpages = PFN_UP((unsigned long)__end_rodata) -
start = PFN_UP(addr);
end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end);
max_pfn = PFN_UP(start + size);
min_low_pfn = PFN_UP(phys_ram_base);
end_pfn = PFN_UP(__pa(_end));
start_pfn = PFN_UP(__pa(_end));
min_low_pfn = PFN_UP(__pa(uml_reserved));
min_low_pfn = PFN_UP(__pa(reserve_end));
register_nosave_region(PFN_DOWN(last_addr), PFN_UP(entry->addr));
map_size = PFN_UP(tboot->tboot_size);
unsigned long end_pfn = PFN_UP(start + size);
end_pfn = PFN_UP(end);
if (PFN_UP(rmp_end) > max_pfn)
max_rmp_pfn = PFN_UP(rmp_end);
if (pages != (PFN_DOWN(e->addr + e->size) - PFN_UP(e->addr))) {
xen_add_extra_mem(PFN_UP(e->addr), pages);
ram_pages += PFN_DOWN(e->addr + e->size) - PFN_UP(e->addr);
PFN_DOWN(e->addr + e->size) - PFN_UP(e->addr));
end_pfn = PFN_UP(remap->paddr + remap->size);
s_pfn = PFN_UP(entry->addr);
unsigned long end_pfn = PFN_UP(end);
end_pfn = PFN_UP(entry->addr);
pfn_s = PFN_UP(addr);
unsigned long nosave_end_pfn = PFN_UP(__pa(&__nosave_end));
min_low_pfn = PFN_UP(memblock_start_of_DRAM());
min_low_pfn = max(min_low_pfn, PFN_UP(PHYS_OFFSET));
mgid = memory_group_register_static(node, PFN_UP(total_length));
max_possible_pfn = max(max_possible_pfn, PFN_UP(end - 1));
want = PFN_UP(words*sizeof(long));
unsigned int nr_pages = PFN_UP(data_size);
unsigned int nr_pages = PFN_UP(payload_size);
rc = memory_group_register_static(numa_node, PFN_UP(total_len));
ttm_bo_kmap(&abo->tbo, 0, PFN_UP(abo->tbo.base.size), &abo->kmap)) {
uint32_t num_pages = PFN_UP(tbo->base.size);
node->mm_nodes[0].size = PFN_UP(node->base.size);
r = ttm_bo_kmap(&bo->tbo, 0, PFN_UP(bo->tbo.base.size), &bo->kmap);
__entry->pages = PFN_UP(bo->tbo.resource->size);
num_pages = PFN_UP(*size + offset);
if (start > PFN_UP(size))
start -= PFN_UP(size);
return bman_res->used_visible_size == PFN_UP(bman_res->base.size);
if (WARN_ON(overflows_type(PFN_UP(res->size), unsigned int))) {
if (sg_alloc_table(st, PFN_UP(res->size), GFP_KERNEL | __GFP_NOWARN)) {
bman_res->used_visible_size = PFN_UP(bman_res->base.size);
return bman_res->used_visible_size == PFN_UP(res->size);
err = ttm_bo_kmap(tbo, 0, PFN_UP(lsdc_bo_size(lbo)), &lbo->kmap);
bo->resource->start + PFN_UP(bo->resource->size) < mappable)
ret = ttm_bo_kmap(&nvbo->bo, 0, PFN_UP(nvbo->bo.base.size), &nvbo->kmap);
u32 page_count = PFN_UP(new_reg->size);
page_count = PFN_UP(new_reg->size);
u32 page_count = PFN_UP(new_reg->size);
page_count = PFN_UP(new_reg->size);
u32 page_count = PFN_UP(new_reg->size);
page_count = PFN_UP(new_reg->size);
u32 page_count = PFN_UP(new_reg->size);
page_count = PFN_UP(new_reg->size);
LINE_COUNT, PFN_UP(new_reg->size));
ret = ttm_bo_kmap(&nvbo->bo, 0, PFN_UP(nvbo->bo.base.size),
PFN_UP(nvbo->bo.base.size),
u32 num_pages = PFN_UP(size);
u32 num_pages = PFN_UP(size);
r = ttm_bo_kmap(&bo->tbo, 0, PFN_UP(bo->tbo.base.size), &bo->kmap);
__entry->pages = PFN_UP(bo->tbo.resource->size);
num_pages = PFN_UP(new_mem->size) * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
PFN_UP(bo->base.size));
ttm_move_memcpy(clear, PFN_UP(dst_mem->size), dst_iter, src_iter);
if (page + 1 > PFN_UP(bo->resource->size))
if (num_pages > PFN_UP(res->size))
if ((start_page + num_pages) > PFN_UP(res->size))
if (unlikely(page_offset >= PFN_UP(bo->base.size)))
u32 num_pages = PFN_UP(size);
u32 num_pages = PFN_UP(size);
PFN_UP(node->base.size),
(PFN_UP(MKS_GUEST_STAT_INSTANCE_MAX_STATS * \
(PFN_UP(MKS_GUEST_STAT_INSTANCE_MAX_STATS * \
(PFN_UP(MKS_GUEST_STAT_INSTANCE_MAX_STATS * \
d.dst_num_pages = PFN_UP(dst->resource->size);
d.src_num_pages = PFN_UP(src->resource->size);
bo->resource->start < PFN_UP(bo->resource->size) &&
buf->places[0].lpfn = PFN_UP(bo->resource->size);
ret = ttm_bo_kmap(bo, 0, PFN_UP(size), &vbo->map);
info.page_size = PFN_UP(size);
for (i = 0; i < PFN_UP(old_bo->resource->size); ++i) {
if (unlikely(PFN_UP(new_query_bo->tbo.resource->size) > 4)) {
gman->used_gmr_pages -= PFN_UP((*res)->size);
gman->used_gmr_pages -= PFN_UP(res->size);
gman->used_gmr_pages += PFN_UP((*res)->size);
const size_t num_pages_stat = PFN_UP(arg->stat_len);
const size_t num_pages_info = PFN_UP(arg->info_len);
const size_t num_pages_strs = PFN_UP(arg->strs_len);
pgoff_t num_pages = PFN_UP(vbo->tbo.resource->size);
if (unlikely(page_offset >= PFN_UP(bo->resource->size))) {
if (page_offset >= PFN_UP(bo->resource->size) ||
ret = ttm_bo_kmap(&buf->tbo, 0, PFN_UP(size), &map);
node->base.mm_nodes[0].size = PFN_UP(node->base.base.size);
PFN_UP(size));
if (!set_memory_encrypted((unsigned long)kbuffer, PFN_UP(size)))
PFN_UP(gpadl->size));
entry[1].second_entry.num_of_pages = PFN_UP(remaining);
unsigned int pages = PFN_UP(vb->planes[0].length);
n_pages = PFN_UP(fw->size);
size_t count = PFN_UP(size);
npages += PFN_UP(sg_dma_len(sg));
iova_addr += PFN_UP(sg_dma_len(sg));
unsigned int n_pages = PFN_UP(IPU6_MMUV2_TRASH_RANGE);
len = PFN_PHYS(PFN_UP(off + bytes));
psize = (PFN_UP(entry->base_addr + entry->size) - pfn) * PAGE_SIZE;
return PFN_UP(end) - PFN_DOWN(start);
data_grants += PFN_UP(sg->offset + sg->length);
#define vscsiif_grants_sg(_sg) (PFN_UP((_sg) * \
size_t count = PFN_UP(size);
npages += PFN_UP(sg_dma_len(sg));
iova_addr += PFN_UP(sg_dma_len(sg));
unsigned int n_pages = PFN_UP(IPU_MMUV2_TRASH_RANGE);
n_pages = PFN_UP(size);
return PFN_UP(vaddr + length) - PFN_DOWN(vaddr);
npages = PFN_UP(size + (iova & ~PAGE_MASK));
int old_pages = PFN_UP(old_bytes);
int new_pages = PFN_UP(new_bytes);
int old_pages = PFN_UP(vm->sbm.next_mb_id - vm->sbm.first_mb_id);
int new_pages = PFN_UP(vm->sbm.next_mb_id - vm->sbm.first_mb_id + 1);
int old_pages = PFN_UP(BITS_TO_LONGS(old_nb_bits) * sizeof(long));
int new_pages = PFN_UP(BITS_TO_LONGS(new_nb_bits) * sizeof(long));
nr_pages = PFN_UP(size);
*npages = PFN_UP(*addr + (*npages<<EFI_PAGE_SHIFT)) - PFN_DOWN(*addr);
return PFN_UP(reg->base);
return PFN_UP(reg->base + reg->size);
return set_memory_decrypted((unsigned long)vaddr, PFN_UP(size));
ret = set_memory_encrypted((unsigned long)vaddr, PFN_UP(size));
if (set_memory_decrypted((unsigned long)vaddr, PFN_UP(bytes)))
if (!set_memory_encrypted((unsigned long)vaddr, PFN_UP(bytes)))
if (!set_memory_encrypted((unsigned long)vaddr, PFN_UP(bytes)))
npages = PFN_UP(eaddr) - PFN_DOWN(addr);
ulong end_pfn = pageblock_align(PFN_UP(end));
pfn = PFN_UP(res.start);
unsigned long nr_pages, nr_total = PFN_UP(end - start);
if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
*out_start_pfn = PFN_UP(r->base);
cursor = PFN_UP(base);
unsigned long start_pfn = PFN_UP(start);
for (pfn = PFN_UP(start); pfn < PFN_DOWN(end); pfn++)
PFN_UP(start + size - 1));
unsigned long nr_pages = PFN_UP(memory_block_memmap_size());
memmap_pages - PFN_UP(memory_block_memmap_size()));
start_pfn = PFN_UP(start_addr);
unsigned long spfn = PFN_UP(start);
for_each_valid_pfn(pfn, PFN_DOWN(start), PFN_UP(end)) {
unsigned long start_pfn = PFN_UP(start);
end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE);
page_end = PFN_UP(off + size);
stream->num_pages = PFN_UP(runtime->dma_bytes);
stream->num_pages = PFN_UP(cstream->runtime->dma_bytes);
pcm->params.buffer.pages = PFN_UP(crtd->dma_bytes);
pcm.params.buffer.pages = PFN_UP(runtime->dma_bytes);