sg_dma_address
sg_dma_address(sg) = seg.ds_addr;
tmp_sg[sg_indx].pg_map_arr = sg_dma_address(sg);
pages[i++] = cpu_to_be64(sg_dma_address(sg) +
pg_addr = sg_dma_address(sg) + (i * iwmr->page_size);
return sg_dma_address(&iter->chunk->mem[iter->page_idx]);
&sg_dma_address(mem), gfp_mask);
*dma_handle = sg_dma_address(&chunk->mem[i]) +
sg_dma_address(&chunk->mem[i]));
db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK);
pages[i++] = sg_dma_address(sg) +
db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK);
base = sg_dma_address(sg);
pfn = sg_dma_address(sg) >> PAGE_SHIFT;
mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order, &sg_dma_address(mem),
*dma_handle = sg_dma_address(&chunk->mem[i]) +
ret = mthca_MAP_ICM_page(dev, sg_dma_address(&db_tab->page[i].mem),
sg_dma_address(&chunk->mem[i]));
return sg_dma_address(&iter->chunk->mem[iter->page_idx]);
pages[i++] = sg_dma_address(sg) +
cpu_to_le32(sg_dma_address(sg) +
((sg_dma_address(sg) +
srq->hw_srq.phy_prod_pair_addr = sg_dma_address(sg);
mr->iova = sg_dma_address(&sgl[0]) + sg_offset;
u64 dma_addr = sg_dma_address(sg) + sg_offset;
sg_offset = prev_addr - sg_dma_address(sg);
return sg_dma_address(sg);