dummy_page
unsigned long dummy_page;
iommu->dummy_page = (unsigned long) page_address(page);
memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
free_page(iommu->dummy_page);
iommu->dummy_page = 0UL;
struct page *dummy_page = ttm_glob.dummy_read_page;
adev->dummy_page_addr = dma_map_page_attrs(&adev->pdev->dev, dummy_page, 0,
pd->dummy_page = alloc_page(GFP_DMA32);
if (!pd->dummy_page)
pd->invalid_pte = psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
clear_page(kmap(pd->dummy_page));
kunmap(pd->dummy_page);
__free_page(pd->dummy_page);
__free_page(pd->dummy_page);
struct page *dummy_page;
struct page *dummy_page;
if (omap_dmm->dummy_page)
__free_page(omap_dmm->dummy_page);
omap_dmm->dummy_page = alloc_page(GFP_KERNEL | __GFP_DMA32);
if (!omap_dmm->dummy_page) {
omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page);
(u32)(rdev->dummy_page.addr >> 12));
(u32)(rdev->dummy_page.addr >> 12));
WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8);
(u32)(rdev->dummy_page.addr >> 12));
(u32)(rdev->dummy_page.addr >> 12));
(u32)(rdev->dummy_page.addr >> 12));
(u32)(rdev->dummy_page.addr >> 12));
WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8);
struct radeon_dummy_page dummy_page;
if (rdev->dummy_page.page)
rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
if (rdev->dummy_page.page == NULL)
rdev->dummy_page.addr = dma_map_page(&rdev->pdev->dev, rdev->dummy_page.page,
if (dma_mapping_error(&rdev->pdev->dev, rdev->dummy_page.addr)) {
__free_page(rdev->dummy_page.page);
rdev->dummy_page.page = NULL;
rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
if (rdev->dummy_page.page == NULL)
dma_unmap_page(&rdev->pdev->dev, rdev->dummy_page.addr, PAGE_SIZE,
__free_page(rdev->dummy_page.page);
rdev->dummy_page.page = NULL;
rdev->gart.pages_entry[t] = rdev->dummy_page.entry;
rdev->dummy_page.entry);
rdev->dummy_page.page;
rdev->gart.pages_entry[i] = rdev->dummy_page.entry;
(u32)(rdev->dummy_page.addr >> 12));
(u32)(rdev->dummy_page.addr >> 12));
(u32)(rdev->dummy_page.addr >> 12));
WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8);
struct page *dummy_page = (struct page *)res;
__free_page(dummy_page);
struct page *dummy_page = (struct page *)res;
__free_page(dummy_page);
struct dummy_page ffcw;
struct dummy_page fcw;
struct dummy_page impdef;
struct dummy_page ffcw;
struct dummy_page fcw;
struct dummy_page impdef;
if (cio2->dummy_page) {
dma_free_coherent(dev, PAGE_SIZE, cio2->dummy_page,
cio2->dummy_page = NULL;
cio2->dummy_page = dma_alloc_coherent(dev, PAGE_SIZE,
if (!cio2->dummy_page || !cio2->dummy_lop) {
void *dummy_page;
mmu_info->dummy_page = pt;
free_page((unsigned long)mmu_info->dummy_page);
void *dummy_page;
mmu->dummy_page = (void *)__get_free_page(GFP_KERNEL);
if (!mmu->dummy_page)
pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->dummy_page));
free_page((unsigned long)mmu->dummy_page);
free_page((unsigned long)mmu->dummy_page);
void *dummy_page;
mmu_info->dummy_page = pt;
free_page((unsigned long)mmu_info->dummy_page);
void *dummy_page;
dummy_page = alloc_page(GFP_KERNEL);
if (!dummy_page)
put_page(dummy_page);
put_page(dummy_page);
dummy_tag_frag.page = dummy_page;
static struct page *dummy_page;
static void *dummy_page[2];
if (dummy_page[i]) {
free_page((unsigned long)dummy_page[i]);
dummy_page[i] = NULL;
dummy_page[i] = (void *)get_zeroed_page(GFP_KERNEL);
if (!dummy_page[i]) {
return virt_to_page(dummy_page[substream->stream]); /* the same page */