PFN_UP
ttm_bo_kmap(&abo->tbo, 0, PFN_UP(abo->tbo.base.size), &abo->kmap)) {
uint32_t num_pages = PFN_UP(tbo->base.size);
node->mm_nodes[0].size = PFN_UP(node->base.size);
r = ttm_bo_kmap(&bo->tbo, 0, PFN_UP(bo->tbo.base.size), &bo->kmap);
__entry->pages = PFN_UP(bo->tbo.resource->size);
num_pages = PFN_UP(*size + offset);
if (start > PFN_UP(size))
start -= PFN_UP(size);
return bman_res->used_visible_size == PFN_UP(bman_res->base.size);
if (WARN_ON(overflows_type(PFN_UP(res->size), unsigned int))) {
if (sg_alloc_table(st, PFN_UP(res->size), GFP_KERNEL | __GFP_NOWARN)) {
bman_res->used_visible_size = PFN_UP(bman_res->base.size);
return bman_res->used_visible_size == PFN_UP(res->size);
r = ttm_bo_kmap(&bo->tbo, 0, PFN_UP(bo->tbo.base.size), &bo->kmap);
__entry->pages = PFN_UP(bo->tbo.resource->size);
num_pages = PFN_UP(new_mem->size) * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
for (i = 0; i < PFN_UP(bo->base.size); i++) {
PFN_UP(bo->base.size));
ttm_move_memcpy(clear, PFN_UP(dst_mem->size), dst_iter, src_iter,
if (page + 1 > PFN_UP(bo->resource->size))
if (num_pages > PFN_UP(bo->resource->size))
if ((start_page + num_pages) > PFN_UP(bo->resource->size))
if (unlikely(page_offset >= PFN_UP(bo->base.size)))
if (unlikely(page_offset >= PFN_UP(bo->base.size)))
u32 num_pages = PFN_UP(size);
u32 num_pages = PFN_UP(size);
PFN_UP(node->base.size),