arch/arc/include/asm/cacheflush.h
38
#define flush_dcache_mmap_lock(mapping) do { } while (0)
arch/arc/include/asm/cacheflush.h
39
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
arch/arm/include/asm/cacheflush.h
321
#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
arch/arm/include/asm/cacheflush.h
322
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
arch/arm/include/asm/device.h
10
struct dma_iommu_mapping *mapping;
arch/arm/include/asm/device.h
24
#define to_dma_iommu_mapping(dev) ((dev)->archdata.mapping)
arch/arm/include/asm/dma-iommu.h
29
void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping);
arch/arm/include/asm/dma-iommu.h
32
struct dma_iommu_mapping *mapping);
arch/arm/mm/dma-mapping.c
1002
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
arch/arm/mm/dma-mapping.c
1011
iommu_unmap(mapping->domain, iova, size);
arch/arm/mm/dma-mapping.c
1012
__free_iova(mapping, iova, size);
arch/arm/mm/dma-mapping.c
1187
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
arch/arm/mm/dma-mapping.c
1197
iova_base = iova = __alloc_iova(mapping, size);
arch/arm/mm/dma-mapping.c
1210
ret = iommu_map(mapping->domain, iova, phys, len, prot,
arch/arm/mm/dma-mapping.c
1221
iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE);
arch/arm/mm/dma-mapping.c
1222
__free_iova(mapping, iova_base, size);
arch/arm/mm/dma-mapping.c
1368
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
arch/arm/mm/dma-mapping.c
1378
dma_addr = __alloc_iova(mapping, len);
arch/arm/mm/dma-mapping.c
1384
ret = iommu_map(mapping->domain, dma_addr, addr, len, prot, GFP_KERNEL);
arch/arm/mm/dma-mapping.c
1390
__free_iova(mapping, dma_addr, len);
arch/arm/mm/dma-mapping.c
1407
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
arch/arm/mm/dma-mapping.c
1417
phys_addr_t phys = iommu_iova_to_phys(mapping->domain, iova);
arch/arm/mm/dma-mapping.c
1422
iommu_unmap(mapping->domain, iova, len);
arch/arm/mm/dma-mapping.c
1423
__free_iova(mapping, iova, len);
arch/arm/mm/dma-mapping.c
1429
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
arch/arm/mm/dma-mapping.c
1437
phys = iommu_iova_to_phys(mapping->domain, iova);
arch/arm/mm/dma-mapping.c
1444
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
arch/arm/mm/dma-mapping.c
1452
phys = iommu_iova_to_phys(mapping->domain, iova);
arch/arm/mm/dma-mapping.c
1491
struct dma_iommu_mapping *mapping;
arch/arm/mm/dma-mapping.c
1507
mapping = kzalloc_obj(struct dma_iommu_mapping);
arch/arm/mm/dma-mapping.c
1508
if (!mapping)
arch/arm/mm/dma-mapping.c
1511
mapping->bitmap_size = bitmap_size;
arch/arm/mm/dma-mapping.c
1512
mapping->bitmaps = kcalloc(extensions, sizeof(unsigned long *),
arch/arm/mm/dma-mapping.c
1514
if (!mapping->bitmaps)
arch/arm/mm/dma-mapping.c
1517
mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL);
arch/arm/mm/dma-mapping.c
1518
if (!mapping->bitmaps[0])
arch/arm/mm/dma-mapping.c
1521
mapping->nr_bitmaps = 1;
arch/arm/mm/dma-mapping.c
1522
mapping->extensions = extensions;
arch/arm/mm/dma-mapping.c
1523
mapping->base = base;
arch/arm/mm/dma-mapping.c
1524
mapping->bits = BITS_PER_BYTE * bitmap_size;
arch/arm/mm/dma-mapping.c
1526
spin_lock_init(&mapping->lock);
arch/arm/mm/dma-mapping.c
1528
mapping->domain = iommu_paging_domain_alloc(dev);
arch/arm/mm/dma-mapping.c
1529
if (IS_ERR(mapping->domain)) {
arch/arm/mm/dma-mapping.c
1530
err = PTR_ERR(mapping->domain);
arch/arm/mm/dma-mapping.c
1534
kref_init(&mapping->kref);
arch/arm/mm/dma-mapping.c
1535
return mapping;
arch/arm/mm/dma-mapping.c
1537
kfree(mapping->bitmaps[0]);
arch/arm/mm/dma-mapping.c
1539
kfree(mapping->bitmaps);
arch/arm/mm/dma-mapping.c
1541
kfree(mapping);
arch/arm/mm/dma-mapping.c
1550
struct dma_iommu_mapping *mapping =
arch/arm/mm/dma-mapping.c
1553
iommu_domain_free(mapping->domain);
arch/arm/mm/dma-mapping.c
1554
for (i = 0; i < mapping->nr_bitmaps; i++)
arch/arm/mm/dma-mapping.c
1555
kfree(mapping->bitmaps[i]);
arch/arm/mm/dma-mapping.c
1556
kfree(mapping->bitmaps);
arch/arm/mm/dma-mapping.c
1557
kfree(mapping);
arch/arm/mm/dma-mapping.c
1560
static int extend_iommu_mapping(struct dma_iommu_mapping *mapping)
arch/arm/mm/dma-mapping.c
1564
if (mapping->nr_bitmaps >= mapping->extensions)
arch/arm/mm/dma-mapping.c
1567
next_bitmap = mapping->nr_bitmaps;
arch/arm/mm/dma-mapping.c
1568
mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size,
arch/arm/mm/dma-mapping.c
1570
if (!mapping->bitmaps[next_bitmap])
arch/arm/mm/dma-mapping.c
1573
mapping->nr_bitmaps++;
arch/arm/mm/dma-mapping.c
1578
void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
arch/arm/mm/dma-mapping.c
1580
if (mapping)
arch/arm/mm/dma-mapping.c
1581
kref_put(&mapping->kref, release_iommu_mapping);
arch/arm/mm/dma-mapping.c
1586
struct dma_iommu_mapping *mapping)
arch/arm/mm/dma-mapping.c
1590
err = iommu_attach_device(mapping->domain, dev);
arch/arm/mm/dma-mapping.c
1594
kref_get(&mapping->kref);
arch/arm/mm/dma-mapping.c
1595
to_dma_iommu_mapping(dev) = mapping;
arch/arm/mm/dma-mapping.c
1615
struct dma_iommu_mapping *mapping)
arch/arm/mm/dma-mapping.c
1619
err = __arm_iommu_attach_device(dev, mapping);
arch/arm/mm/dma-mapping.c
1637
struct dma_iommu_mapping *mapping;
arch/arm/mm/dma-mapping.c
1639
mapping = to_dma_iommu_mapping(dev);
arch/arm/mm/dma-mapping.c
1640
if (!mapping) {
arch/arm/mm/dma-mapping.c
1645
iommu_detach_device(mapping->domain, dev);
arch/arm/mm/dma-mapping.c
1646
kref_put(&mapping->kref, release_iommu_mapping);
arch/arm/mm/dma-mapping.c
1656
struct dma_iommu_mapping *mapping;
arch/arm/mm/dma-mapping.c
1663
mapping = arm_iommu_create_mapping(dev, dma_base, size);
arch/arm/mm/dma-mapping.c
1664
if (IS_ERR(mapping)) {
arch/arm/mm/dma-mapping.c
1670
if (__arm_iommu_attach_device(dev, mapping)) {
arch/arm/mm/dma-mapping.c
1673
arm_iommu_release_mapping(mapping);
arch/arm/mm/dma-mapping.c
1682
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
arch/arm/mm/dma-mapping.c
1684
if (!mapping)
arch/arm/mm/dma-mapping.c
1688
arm_iommu_release_mapping(mapping);
arch/arm/mm/dma-mapping.c
752
static int extend_iommu_mapping(struct dma_iommu_mapping *mapping);
arch/arm/mm/dma-mapping.c
754
static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
arch/arm/mm/dma-mapping.c
760
size_t mapping_size = mapping->bits << PAGE_SHIFT;
arch/arm/mm/dma-mapping.c
771
spin_lock_irqsave(&mapping->lock, flags);
arch/arm/mm/dma-mapping.c
772
for (i = 0; i < mapping->nr_bitmaps; i++) {
arch/arm/mm/dma-mapping.c
773
start = bitmap_find_next_zero_area(mapping->bitmaps[i],
arch/arm/mm/dma-mapping.c
774
mapping->bits, 0, count, align);
arch/arm/mm/dma-mapping.c
776
if (start > mapping->bits)
arch/arm/mm/dma-mapping.c
779
bitmap_set(mapping->bitmaps[i], start, count);
arch/arm/mm/dma-mapping.c
788
if (i == mapping->nr_bitmaps) {
arch/arm/mm/dma-mapping.c
789
if (extend_iommu_mapping(mapping)) {
arch/arm/mm/dma-mapping.c
790
spin_unlock_irqrestore(&mapping->lock, flags);
arch/arm/mm/dma-mapping.c
794
start = bitmap_find_next_zero_area(mapping->bitmaps[i],
arch/arm/mm/dma-mapping.c
795
mapping->bits, 0, count, align);
arch/arm/mm/dma-mapping.c
797
if (start > mapping->bits) {
arch/arm/mm/dma-mapping.c
798
spin_unlock_irqrestore(&mapping->lock, flags);
arch/arm/mm/dma-mapping.c
802
bitmap_set(mapping->bitmaps[i], start, count);
arch/arm/mm/dma-mapping.c
804
spin_unlock_irqrestore(&mapping->lock, flags);
arch/arm/mm/dma-mapping.c
806
iova = mapping->base + (mapping_size * i);
arch/arm/mm/dma-mapping.c
812
static inline void __free_iova(struct dma_iommu_mapping *mapping,
arch/arm/mm/dma-mapping.c
816
size_t mapping_size = mapping->bits << PAGE_SHIFT;
arch/arm/mm/dma-mapping.c
824
bitmap_index = (u32) (addr - mapping->base) / (u32) mapping_size;
arch/arm/mm/dma-mapping.c
825
BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions);
arch/arm/mm/dma-mapping.c
827
bitmap_base = mapping->base + mapping_size * bitmap_index;
arch/arm/mm/dma-mapping.c
842
spin_lock_irqsave(&mapping->lock, flags);
arch/arm/mm/dma-mapping.c
843
bitmap_clear(mapping->bitmaps[bitmap_index], start, count);
arch/arm/mm/dma-mapping.c
844
spin_unlock_irqrestore(&mapping->lock, flags);
arch/arm/mm/dma-mapping.c
963
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
arch/arm/mm/dma-mapping.c
968
dma_addr = __alloc_iova(mapping, size);
arch/arm/mm/dma-mapping.c
985
ret = iommu_map(mapping->domain, iova, phys, len,
arch/arm/mm/dma-mapping.c
995
iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
arch/arm/mm/dma-mapping.c
996
__free_iova(mapping, dma_addr, size);
arch/arm/mm/fault-armv.c
124
make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
arch/arm/mm/fault-armv.c
142
flush_dcache_mmap_lock(mapping);
arch/arm/mm/fault-armv.c
143
vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
arch/arm/mm/fault-armv.c
169
flush_dcache_mmap_unlock(mapping);
arch/arm/mm/fault-armv.c
191
struct address_space *mapping;
arch/arm/mm/fault-armv.c
205
mapping = folio_flush_mapping(folio);
arch/arm/mm/fault-armv.c
207
__flush_dcache_folio(mapping, folio);
arch/arm/mm/fault-armv.c
208
if (mapping) {
arch/arm/mm/fault-armv.c
210
make_coherent(mapping, vma, addr, ptep, pfn);
arch/arm/mm/flush.c
199
void __flush_dcache_folio(struct address_space *mapping, struct folio *folio)
arch/arm/mm/flush.c
234
if (mapping && cache_is_vipt_aliasing())
arch/arm/mm/flush.c
238
static void __flush_dcache_aliases(struct address_space *mapping, struct folio *folio)
arch/arm/mm/flush.c
253
flush_dcache_mmap_lock(mapping);
arch/arm/mm/flush.c
254
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff_end) {
arch/arm/mm/flush.c
281
flush_dcache_mmap_unlock(mapping);
arch/arm/mm/flush.c
289
struct address_space *mapping;
arch/arm/mm/flush.c
303
mapping = folio_flush_mapping(folio);
arch/arm/mm/flush.c
305
mapping = NULL;
arch/arm/mm/flush.c
308
__flush_dcache_folio(mapping, folio);
arch/arm/mm/flush.c
336
struct address_space *mapping;
arch/arm/mm/flush.c
351
mapping = folio_flush_mapping(folio);
arch/arm/mm/flush.c
354
mapping && !folio_mapped(folio))
arch/arm/mm/flush.c
357
__flush_dcache_folio(mapping, folio);
arch/arm/mm/flush.c
358
if (mapping && cache_is_vivt())
arch/arm/mm/flush.c
359
__flush_dcache_aliases(mapping, folio);
arch/arm/mm/flush.c
360
else if (mapping)
arch/arm/mm/mm.h
48
void __flush_dcache_folio(struct address_space *mapping, struct folio *folio);
arch/arm64/include/asm/kvm_host.h
91
struct pkvm_mapping *mapping; /* only used from EL1 */
arch/arm64/kvm/mmu.c
1148
kfree(mc->mapping);
arch/arm64/kvm/mmu.c
1157
if (!mc->mapping) {
arch/arm64/kvm/mmu.c
1158
mc->mapping = kzalloc_obj(struct pkvm_mapping,
arch/arm64/kvm/mmu.c
1160
if (!mc->mapping)
arch/arm64/kvm/pkvm.c
329
struct pkvm_mapping *mapping;
arch/arm64/kvm/pkvm.c
335
for_each_mapping_in_range_safe(pgt, start, end, mapping) {
arch/arm64/kvm/pkvm.c
336
ret = kvm_call_hyp_nvhe(__pkvm_host_unshare_guest, handle, mapping->gfn,
arch/arm64/kvm/pkvm.c
337
mapping->nr_pages);
arch/arm64/kvm/pkvm.c
340
pkvm_mapping_remove(mapping, &pgt->pkvm_mappings);
arch/arm64/kvm/pkvm.c
341
kfree(mapping);
arch/arm64/kvm/pkvm.c
364
struct pkvm_mapping *mapping = NULL;
arch/arm64/kvm/pkvm.c
380
mapping = pkvm_mapping_iter_first(&pgt->pkvm_mappings, addr, addr + size - 1);
arch/arm64/kvm/pkvm.c
381
if (mapping) {
arch/arm64/kvm/pkvm.c
382
if (size == (mapping->nr_pages * PAGE_SIZE))
arch/arm64/kvm/pkvm.c
389
mapping = NULL;
arch/arm64/kvm/pkvm.c
396
swap(mapping, cache->mapping);
arch/arm64/kvm/pkvm.c
397
mapping->gfn = gfn;
arch/arm64/kvm/pkvm.c
398
mapping->pfn = pfn;
arch/arm64/kvm/pkvm.c
399
mapping->nr_pages = size / PAGE_SIZE;
arch/arm64/kvm/pkvm.c
400
pkvm_mapping_insert(mapping, &pgt->pkvm_mappings);
arch/arm64/kvm/pkvm.c
416
struct pkvm_mapping *mapping;
arch/arm64/kvm/pkvm.c
420
for_each_mapping_in_range_safe(pgt, addr, addr + size, mapping) {
arch/arm64/kvm/pkvm.c
421
ret = kvm_call_hyp_nvhe(__pkvm_host_wrprotect_guest, handle, mapping->gfn,
arch/arm64/kvm/pkvm.c
422
mapping->nr_pages);
arch/arm64/kvm/pkvm.c
433
struct pkvm_mapping *mapping;
arch/arm64/kvm/pkvm.c
436
for_each_mapping_in_range_safe(pgt, addr, addr + size, mapping)
arch/arm64/kvm/pkvm.c
437
__clean_dcache_guest_page(pfn_to_kaddr(mapping->pfn),
arch/arm64/kvm/pkvm.c
438
PAGE_SIZE * mapping->nr_pages);
arch/arm64/kvm/pkvm.c
447
struct pkvm_mapping *mapping;
arch/arm64/kvm/pkvm.c
451
for_each_mapping_in_range_safe(pgt, addr, addr + size, mapping)
arch/arm64/kvm/pkvm.c
452
young |= kvm_call_hyp_nvhe(__pkvm_host_test_clear_young_guest, handle, mapping->gfn,
arch/arm64/kvm/pkvm.c
453
mapping->nr_pages, mkold);
arch/csky/abiv1/cacheflush.c
20
struct address_space *mapping;
arch/csky/abiv1/cacheflush.c
25
mapping = folio_flush_mapping(folio);
arch/csky/abiv1/cacheflush.c
27
if (mapping && !folio_mapped(folio))
arch/csky/abiv1/cacheflush.c
31
if (mapping)
arch/csky/abiv1/inc/abi/cacheflush.h
19
#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
arch/csky/abiv1/inc/abi/cacheflush.h
20
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
arch/csky/abiv2/inc/abi/cacheflush.h
34
#define flush_dcache_mmap_lock(mapping) do { } while (0)
arch/csky/abiv2/inc/abi/cacheflush.h
35
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
arch/loongarch/include/asm/cacheflush.h
48
#define flush_dcache_mmap_lock(mapping) do { } while (0)
arch/loongarch/include/asm/cacheflush.h
49
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
arch/m68k/include/asm/cacheflush_mm.h
261
#define flush_dcache_mmap_lock(mapping) do { } while (0)
arch/m68k/include/asm/cacheflush_mm.h
262
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
arch/mips/include/asm/cacheflush.h
76
#define flush_dcache_mmap_lock(mapping) do { } while (0)
arch/mips/include/asm/cacheflush.h
77
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
arch/mips/include/asm/mach-rc32434/pci.h
38
u32 mapping; /* mapping. */
arch/mips/include/asm/vdso.h
35
struct vm_special_mapping mapping;
arch/mips/kernel/vdso.c
171
&image->mapping);
arch/mips/kernel/vdso.c
41
image->mapping.pages[i] = pfn_to_page(data_pfn + i);
arch/mips/mm/cache.c
105
struct address_space *mapping = folio_flush_mapping(folio);
arch/mips/mm/cache.c
109
if (mapping && !mapping_mapped(mapping)) {
arch/mips/pci/pci-rc32434.c
155
rc32434_pci->pcilba[0].mapping = (unsigned int) (PCI_ADDR_START);
arch/mips/pci/pci-rc32434.c
163
rc32434_pci->pcilba[1].mapping = 0x60000000;
arch/mips/pci/pci-rc32434.c
170
rc32434_pci->pcilba[2].mapping = 0x18FFFFFF;
arch/mips/pci/pci-rc32434.c
179
rc32434_pci->pcilba[3].mapping = 0x18800000;
arch/nios2/include/asm/cacheflush.h
54
#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
arch/nios2/include/asm/cacheflush.h
55
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
arch/nios2/include/asm/cacheflush.h
56
#define flush_dcache_mmap_lock_irqsave(mapping, flags) \
arch/nios2/include/asm/cacheflush.h
57
xa_lock_irqsave(&mapping->i_pages, flags)
arch/nios2/include/asm/cacheflush.h
58
#define flush_dcache_mmap_unlock_irqrestore(mapping, flags) \
arch/nios2/include/asm/cacheflush.h
59
xa_unlock_irqrestore(&mapping->i_pages, flags)
arch/nios2/mm/cacheflush.c
177
struct address_space *mapping;
arch/nios2/mm/cacheflush.c
186
mapping = folio_flush_mapping(folio);
arch/nios2/mm/cacheflush.c
189
if (mapping && !mapping_mapped(mapping)) {
arch/nios2/mm/cacheflush.c
193
if (mapping) {
arch/nios2/mm/cacheflush.c
195
flush_aliases(mapping, folio);
arch/nios2/mm/cacheflush.c
215
struct address_space *mapping;
arch/nios2/mm/cacheflush.c
233
mapping = folio_flush_mapping(folio);
arch/nios2/mm/cacheflush.c
234
if (mapping) {
arch/nios2/mm/cacheflush.c
235
flush_aliases(mapping, folio);
arch/nios2/mm/cacheflush.c
74
static void flush_aliases(struct address_space *mapping, struct folio *folio)
arch/nios2/mm/cacheflush.c
84
flush_dcache_mmap_lock_irqsave(mapping, flags);
arch/nios2/mm/cacheflush.c
85
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff + nr - 1) {
arch/nios2/mm/cacheflush.c
96
flush_dcache_mmap_unlock_irqrestore(mapping, flags);
arch/parisc/include/asm/cacheflush.h
54
#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
arch/parisc/include/asm/cacheflush.h
55
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
arch/parisc/include/asm/cacheflush.h
56
#define flush_dcache_mmap_lock_irqsave(mapping, flags) \
arch/parisc/include/asm/cacheflush.h
57
xa_lock_irqsave(&mapping->i_pages, flags)
arch/parisc/include/asm/cacheflush.h
58
#define flush_dcache_mmap_unlock_irqrestore(mapping, flags) \
arch/parisc/include/asm/cacheflush.h
59
xa_unlock_irqrestore(&mapping->i_pages, flags)
arch/parisc/kernel/cache.c
475
struct address_space *mapping = folio_flush_mapping(folio);
arch/parisc/kernel/cache.c
483
if (mapping && !mapping_mapped(mapping)) {
arch/parisc/kernel/cache.c
493
if (!mapping)
arch/parisc/kernel/cache.c
505
flush_dcache_mmap_lock_irqsave(mapping, flags);
arch/parisc/kernel/cache.c
506
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff + nr - 1) {
arch/parisc/kernel/cache.c
541
flush_dcache_mmap_unlock_irqrestore(mapping, flags);
arch/powerpc/kernel/iommu.c
917
dma_addr_t mapping;
arch/powerpc/kernel/iommu.c
951
mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
arch/powerpc/kernel/iommu.c
953
if (mapping == DMA_MAPPING_ERROR) {
arch/powerpc/kernel/iommu.c
958
*dma_handle = mapping | ((u64)ret & (tcesize - 1));
arch/powerpc/kvm/book3s_xive.h
145
struct address_space *mapping;
arch/powerpc/kvm/book3s_xive_native.c
1028
xive->mapping = NULL;
arch/powerpc/kvm/book3s_xive_native.c
216
if (xive->mapping)
arch/powerpc/kvm/book3s_xive_native.c
217
unmap_mapping_range(xive->mapping,
arch/powerpc/kvm/book3s_xive_native.c
335
xive->mapping = vma->vm_file->f_mapping;
arch/powerpc/platforms/pseries/papr_scm.c
1203
struct nd_mapping_desc mapping;
arch/powerpc/platforms/pseries/papr_scm.c
1250
memset(&mapping, 0, sizeof(mapping));
arch/powerpc/platforms/pseries/papr_scm.c
1251
mapping.nvdimm = p->nvdimm;
arch/powerpc/platforms/pseries/papr_scm.c
1252
mapping.start = 0;
arch/powerpc/platforms/pseries/papr_scm.c
1253
mapping.size = p->blocks * p->block_size; // XXX: potential overflow?
arch/powerpc/platforms/pseries/papr_scm.c
1263
ndr_desc.mapping = &mapping;
arch/s390/kernel/uv.c
308
struct address_space *mapping;
arch/s390/kernel/uv.c
342
!folio->mapping || !mapping_can_writeback(folio->mapping)) {
arch/s390/kernel/uv.c
354
mapping = folio->mapping;
arch/s390/kernel/uv.c
357
inode = igrab(mapping->host);
arch/s390/kernel/uv.c
363
filemap_write_and_wait_range(mapping, lstart, lend);
arch/s390/kernel/uv.c
364
iput(mapping->host);
arch/sh/include/asm/cacheflush.h
96
#define flush_dcache_mmap_lock(mapping) do { } while (0)
arch/sh/include/asm/cacheflush.h
97
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
arch/sh/kernel/cpu/sh4/sq.c
321
__ATTR(mapping, 0644, mapping_show, mapping_store);
arch/sh/mm/cache-sh4.c
114
struct address_space *mapping = folio_flush_mapping(folio);
arch/sh/mm/cache-sh4.c
116
if (mapping && !mapping_mapped(mapping))
arch/sh/mm/cache-sh7705.c
138
struct address_space *mapping = folio_flush_mapping(folio);
arch/sh/mm/cache-sh7705.c
140
if (mapping && !mapping_mapped(mapping))
arch/sparc/include/asm/cacheflush_32.h
47
#define flush_dcache_mmap_lock(mapping) do { } while (0)
arch/sparc/include/asm/cacheflush_32.h
48
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
arch/sparc/include/asm/cacheflush_64.h
74
#define flush_dcache_mmap_lock(mapping) do { } while (0)
arch/sparc/include/asm/cacheflush_64.h
75
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
arch/sparc/mm/init_64.c
460
struct address_space *mapping;
arch/sparc/mm/init_64.c
475
mapping = folio_flush_mapping(folio);
arch/sparc/mm/init_64.c
476
if (mapping && !mapping_mapped(mapping)) {
arch/sparc/mm/tlb.c
128
struct address_space *mapping;
arch/sparc/mm/tlb.c
141
mapping = folio_flush_mapping(folio);
arch/sparc/mm/tlb.c
142
if (!mapping)
arch/x86/kernel/cpu/sgx/encl.c
953
struct address_space *mapping = encl->backing->f_mapping;
arch/x86/kernel/cpu/sgx/encl.c
954
gfp_t gfpmask = mapping_gfp_mask(mapping);
arch/x86/kernel/cpu/sgx/encl.c
956
return shmem_read_mapping_page_gfp(mapping, index, gfpmask);
arch/x86/kernel/jailhouse.c
203
void *mapping;
arch/x86/kernel/jailhouse.c
224
mapping = early_memremap(pa_data, sizeof(header));
arch/x86/kernel/jailhouse.c
225
memcpy(&header, mapping, sizeof(header));
arch/x86/kernel/jailhouse.c
226
early_memunmap(mapping, sizeof(header));
arch/x86/kernel/jailhouse.c
244
mapping = early_memremap(pa_data, setup_data_len);
arch/x86/kernel/jailhouse.c
245
memcpy(&setup_data, mapping, setup_data_len);
arch/x86/kernel/jailhouse.c
246
early_memunmap(mapping, setup_data_len);
arch/x86/xen/mmu_pv.c
1716
struct xen_machphys_mapping mapping;
arch/x86/xen/mmu_pv.c
1718
if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
arch/x86/xen/mmu_pv.c
1719
machine_to_phys_mapping = (unsigned long *)mapping.v_start;
arch/x86/xen/mmu_pv.c
1720
machine_to_phys_nr = mapping.max_mfn + 1;
arch/xtensa/mm/cache.c
128
struct address_space *mapping = folio_flush_mapping(folio);
arch/xtensa/mm/cache.c
136
if (mapping && !mapping_mapped(mapping)) {
arch/xtensa/mm/cache.c
155
if (!alias && !mapping)
arch/xtensa/mm/cache.c
168
if (mapping)
block/bdev.c
102
invalidate_mapping_pages(mapping, 0, -1);
block/bdev.c
1280
struct address_space *mapping = inode->i_mapping;
block/bdev.c
1285
mapping->nrpages == 0) {
block/bdev.c
85
struct address_space *mapping = bdev->bd_mapping;
block/bdev.c
87
if (mapping_empty(mapping))
block/bdev.c
91
truncate_inode_pages(mapping, 0);
block/bdev.c
97
struct address_space *mapping = bdev->bd_mapping;
block/bdev.c
99
if (mapping->nrpages) {
block/fops.c
477
static int blkdev_writepages(struct address_space *mapping,
block/fops.c
485
while ((folio = writeback_iter(mapping, wbc, folio, &err)))
block/fops.c
503
struct address_space *mapping, loff_t pos,
block/fops.c
507
return block_write_begin(mapping, pos, len, foliop, blkdev_get_block);
block/fops.c
511
struct address_space *mapping,
block/fops.c
573
static int blkdev_writepages(struct address_space *mapping,
block/fops.c
577
.inode = mapping->host,
block/partitions/core.c
715
struct address_space *mapping = state->disk->part0->bd_mapping;
block/partitions/core.c
723
folio = read_mapping_folio(mapping, n >> PAGE_SECTORS_SHIFT, NULL);
drivers/acpi/acpica/exregion.c
493
struct acpi_data_table_mapping *mapping;
drivers/acpi/acpica/exregion.c
498
mapping = (struct acpi_data_table_mapping *) region_context;
drivers/acpi/acpica/exregion.c
499
pointer = ACPI_CAST_PTR(char, mapping->pointer) +
drivers/acpi/acpica/exregion.c
500
(address - ACPI_PTR_TO_PHYSADDR(mapping->pointer));
drivers/acpi/nfit/core.c
2290
struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
drivers/acpi/nfit/core.c
2291
struct nvdimm *nvdimm = mapping->nvdimm;
drivers/acpi/nfit/core.c
2332
struct nd_mapping_desc *mapping = &ndr_desc->mapping[j];
drivers/acpi/nfit/core.c
2333
struct nvdimm *nvdimm = mapping->nvdimm;
drivers/acpi/nfit/core.c
2342
mapping->position = i;
drivers/acpi/nfit/core.c
2562
struct nd_mapping_desc *mapping, struct nd_region_desc *ndr_desc,
drivers/acpi/nfit/core.c
2576
mapping->nvdimm = nvdimm;
drivers/acpi/nfit/core.c
2580
mapping->start = memdev->address;
drivers/acpi/nfit/core.c
2581
mapping->size = memdev->region_size;
drivers/acpi/nfit/core.c
2663
struct nd_mapping_desc *mapping;
drivers/acpi/nfit/core.c
2675
mapping = &mappings[count++];
drivers/acpi/nfit/core.c
2676
rc = acpi_nfit_init_mapping(acpi_desc, mapping, ndr_desc,
drivers/acpi/nfit/core.c
2682
ndr_desc->mapping = mappings;
drivers/atm/he.c
1568
dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
drivers/atm/he.c
1607
dma_addr_t mapping;
drivers/atm/he.c
1609
tpd = dma_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC, &mapping);
drivers/atm/he.c
1613
tpd->status = TPD_ADDR(mapping);
drivers/atm/he.c
1680
dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
drivers/atm/he.c
1773
dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
drivers/atm/he.c
1881
dma_addr_t mapping;
drivers/atm/he.c
1904
heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC, &mapping);
drivers/atm/he.c
1907
heb->mapping = mapping;
drivers/atm/he.c
1912
new_tail->phys = mapping + offsetof(struct he_buff, data);
drivers/atm/he.c
772
dma_addr_t mapping;
drivers/atm/he.c
815
heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL, &mapping);
drivers/atm/he.c
818
heb->mapping = mapping;
drivers/atm/he.c
825
he_dev->rbpl_base[i].phys = mapping + offsetof(struct he_buff, data);
drivers/atm/he.c
889
dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
drivers/atm/he.h
225
dma_addr_t mapping;
drivers/block/rbd.c
1215
rbd_dev->mapping.size = size;
drivers/block/rbd.c
1221
rbd_dev->mapping.size = 0;
drivers/block/rbd.c
1841
rbd_dev->mapping.size);
drivers/block/rbd.c
3507
mapping_size = rbd_dev->mapping.size;
drivers/block/rbd.c
445
struct rbd_mapping mapping;
drivers/block/rbd.c
4948
size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
drivers/block/rbd.c
5031
(unsigned long long)rbd_dev->mapping.size);
drivers/block/rbd.c
6815
set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
drivers/block/rbd.c
6996
rbd_dev->mapping.size = header->image_size;
drivers/cxl/core/region.c
2766
struct cxl_memdev *cxlmd = cxlr_pmem->mapping[i].cxlmd;
drivers/cxl/core/region.c
3467
kzalloc_flex(*cxlr_pmem, mapping, p->nr_targets);
drivers/cxl/core/region.c
3479
struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i];
drivers/cxl/cxl.h
603
struct cxl_pmem_region_mapping mapping[];
drivers/cxl/pmem.c
454
struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i];
drivers/cxl/pmem.c
486
ndr_desc.mapping = mappings;
drivers/dax/bus.c
1199
static DEVICE_ATTR_WO(mapping);
drivers/dax/bus.c
683
struct dax_mapping *mapping = to_dax_mapping(dev);
drivers/dax/bus.c
687
ida_free(&dev_dax->ida, mapping->id);
drivers/dax/bus.c
688
kfree(mapping);
drivers/dax/bus.c
695
struct dax_mapping *mapping = to_dax_mapping(dev);
drivers/dax/bus.c
700
dev_dax->ranges[mapping->range_id].mapping = NULL;
drivers/dax/bus.c
701
mapping->range_id = -1;
drivers/dax/bus.c
708
struct dax_mapping *mapping = to_dax_mapping(dev);
drivers/dax/bus.c
715
if (mapping->range_id < 0) {
drivers/dax/bus.c
720
return &dev_dax->ranges[mapping->range_id];
drivers/dax/bus.c
800
struct dax_mapping *mapping;
drivers/dax/bus.c
810
mapping = kzalloc_obj(*mapping);
drivers/dax/bus.c
811
if (!mapping)
drivers/dax/bus.c
813
mapping->range_id = range_id;
drivers/dax/bus.c
814
mapping->id = ida_alloc(&dev_dax->ida, GFP_KERNEL);
drivers/dax/bus.c
815
if (mapping->id < 0) {
drivers/dax/bus.c
816
kfree(mapping);
drivers/dax/bus.c
819
dev_dax->ranges[range_id].mapping = mapping;
drivers/dax/bus.c
820
dev = &mapping->dev;
drivers/dax/bus.c
825
dev_set_name(dev, "mapping%d", mapping->id);
drivers/dax/bus.c
964
struct dax_mapping *mapping = dev_dax->ranges[i].mapping;
drivers/dax/bus.c
971
unregister_dax_mapping, &mapping->dev);
drivers/dax/dax-private.h
64
struct dax_mapping *mapping;
drivers/dax/device.c
101
if (folio->mapping)
drivers/dax/device.c
104
folio->mapping = filp->f_mapping;
drivers/dma-buf/dma-resv.c
781
struct address_space mapping;
drivers/dma-buf/dma-resv.c
788
address_space_init_once(&mapping);
drivers/dma-buf/dma-resv.c
797
i_mmap_lock_write(&mapping);
drivers/dma-buf/dma-resv.c
798
i_mmap_unlock_write(&mapping);
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
1786
struct amdgpu_bo_va_mapping *mapping;
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
1791
mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
1792
if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
1795
*bo = mapping->bo_va->base.bo;
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
1796
*map = mapping;
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h
86
struct amdgpu_bo_va_mapping **mapping);
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
139
p[x].mapping = adev->mman.bdev.dev_mapping;
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
247
p[x].mapping = NULL;
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
1089
struct amdgpu_bo_va_mapping *mapping;
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
1102
amdgpu_vm_bo_va_for_each_valid_mapping(bo_va, mapping) {
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
1104
vm_entries[num_mappings].addr = mapping->start * AMDGPU_GPU_PAGE_SIZE;
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
1105
vm_entries[num_mappings].size = (mapping->last - mapping->start + 1) * AMDGPU_GPU_PAGE_SIZE;
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
1106
vm_entries[num_mappings].offset = mapping->offset;
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
1107
vm_entries[num_mappings].flags = mapping->flags;
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
1112
amdgpu_vm_bo_va_for_each_invalid_mapping(bo_va, mapping) {
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
1114
vm_entries[num_mappings].addr = mapping->start * AMDGPU_GPU_PAGE_SIZE;
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
1115
vm_entries[num_mappings].size = (mapping->last - mapping->start + 1) * AMDGPU_GPU_PAGE_SIZE;
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
1116
vm_entries[num_mappings].offset = mapping->offset;
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
1117
vm_entries[num_mappings].flags = mapping->flags;
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
243
struct amdgpu_bo_va_mapping *mapping),
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
244
TP_ARGS(bo_va, mapping),
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
255
__entry->start = mapping->start;
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
256
__entry->last = mapping->last;
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
257
__entry->offset = mapping->offset;
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
258
__entry->flags = mapping->flags;
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
267
struct amdgpu_bo_va_mapping *mapping),
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
268
TP_ARGS(bo_va, mapping),
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
279
__entry->start = mapping->start;
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
280
__entry->last = mapping->last;
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
281
__entry->offset = mapping->offset;
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
282
__entry->flags = mapping->flags;
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
290
TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
291
TP_ARGS(mapping),
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
299
__entry->soffset = mapping->start;
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
300
__entry->eoffset = mapping->last + 1;
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
301
__entry->flags = mapping->flags;
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
308
TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
309
TP_ARGS(mapping)
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
313
TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
314
TP_ARGS(mapping)
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
318
TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
319
TP_ARGS(mapping)
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
1224
ttm->pages[i]->mapping = bdev->dev_mapping;
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
1256
ttm->pages[i]->mapping = NULL;
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
2759
if (p->mapping != adev->mman.bdev.dev_mapping)
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
2810
if (p->mapping != adev->mman.bdev.dev_mapping)
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
1552
struct amdgpu_bo_va_mapping *mapping,
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
1556
struct amdgpu_bo_va *bo_va = mapping->bo_va;
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
270
struct amdgpu_bo_va_mapping *mapping;
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
276
mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
277
if (!IS_ERR_OR_NULL(mapping) && atomic_read(&mapping->bo_va->userq_va_mapped))
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
304
static void amdgpu_userq_buffer_va_list_del(struct amdgpu_bo_va_mapping *mapping,
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
307
atomic_set(&mapping->bo_va->userq_va_mapped, 0);
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
316
struct amdgpu_bo_va_mapping *mapping;
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
324
mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, va_cursor->gpu_addr);
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
325
if (!mapping) {
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
331
amdgpu_userq_buffer_va_list_del(mapping, va_cursor);
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
162
struct amdgpu_bo_va_mapping *mapping,
drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
372
struct amdgpu_bo_va_mapping *mapping;
drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
384
mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, addr >> PAGE_SHIFT);
drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
385
if (!mapping) {
drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
391
bo = amdgpu_bo_ref(mapping->bo_va->base.bo);
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
583
struct amdgpu_bo_va_mapping *mapping;
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
589
r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
915
struct amdgpu_bo_va_mapping *mapping;
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
922
r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
930
end = (mapping->last + 1 - mapping->start);
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
933
addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE;
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
648
struct amdgpu_bo_va_mapping *mapping;
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
665
r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
696
struct amdgpu_bo_va_mapping *mapping;
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
708
r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
716
(mapping->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
722
addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE;
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1267
struct amdgpu_bo_va_mapping *mapping;
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1353
list_for_each_entry(mapping, &bo_va->invalids, list) {
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1359
if (!(mapping->flags & AMDGPU_VM_PAGE_READABLE))
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1361
if (!(mapping->flags & AMDGPU_VM_PAGE_WRITEABLE))
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1365
amdgpu_gmc_get_vm_pte(adev, vm, bo, mapping->flags,
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1368
trace_amdgpu_vm_bo_update(mapping);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1371
!uncached, &sync, mapping->start,
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1372
mapping->last, update_flags,
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1373
mapping->offset, vram_base, mem,
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1399
list_for_each_entry(mapping, &bo_va->valids, list)
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1400
trace_amdgpu_vm_bo_mapping(mapping);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1504
struct amdgpu_bo_va_mapping *mapping,
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1507
if (mapping->flags & AMDGPU_VM_PAGE_PRT)
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1509
kfree(mapping);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1552
struct amdgpu_bo_va_mapping *mapping;
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1569
mapping = list_first_entry(&vm->freed,
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1571
list_del(&mapping->list);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1574
&sync, mapping->start, mapping->last,
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1576
amdgpu_vm_free_mapping(adev, vm, mapping, f);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1779
struct amdgpu_bo_va_mapping *mapping)
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1784
mapping->bo_va = bo_va;
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1785
list_add(&mapping->list, &bo_va->invalids);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1786
amdgpu_vm_it_insert(mapping, &vm->va);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1788
if (mapping->flags & AMDGPU_VM_PAGE_PRT)
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1794
trace_amdgpu_vm_bo_map(bo_va, mapping);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1850
struct amdgpu_bo_va_mapping *mapping, *tmp;
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1872
mapping = kmalloc_obj(*mapping);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1873
if (!mapping)
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1876
mapping->start = saddr;
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1877
mapping->last = eaddr;
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1878
mapping->offset = offset;
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1879
mapping->flags = flags;
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1881
amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1909
struct amdgpu_bo_va_mapping *mapping;
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1919
mapping = kmalloc_obj(*mapping);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1920
if (!mapping)
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1925
kfree(mapping);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1932
mapping->start = saddr;
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1933
mapping->last = eaddr;
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1934
mapping->offset = offset;
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1935
mapping->flags = flags;
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1937
amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1960
struct amdgpu_bo_va_mapping *mapping;
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1967
list_for_each_entry(mapping, &bo_va->valids, list) {
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1968
if (mapping->start == saddr)
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1972
if (&mapping->list == &bo_va->valids) {
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1975
list_for_each_entry(mapping, &bo_va->invalids, list) {
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1976
if (mapping->start == saddr)
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1980
if (&mapping->list == &bo_va->invalids)
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1989
r = amdgpu_userq_gem_va_unmap_validate(adev, mapping, saddr);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1995
list_del(&mapping->list);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1996
amdgpu_vm_it_remove(mapping, &vm->va);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1997
mapping->bo_va = NULL;
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1998
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2001
list_add(&mapping->list, &vm->freed);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2003
amdgpu_vm_free_mapping(adev, vm, mapping,
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2157
struct amdgpu_bo_va_mapping *mapping;
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2162
for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2163
mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2164
if (mapping->bo_va && mapping->bo_va->base.bo) {
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2167
bo = mapping->bo_va->base.bo;
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2173
trace_amdgpu_vm_bo_cs(mapping);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2190
struct amdgpu_bo_va_mapping *mapping, *next;
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2217
list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2218
list_del(&mapping->list);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2219
amdgpu_vm_it_remove(mapping, &vm->va);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2220
mapping->bo_va = NULL;
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2221
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2222
list_add(&mapping->list, &vm->freed);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2224
list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2225
list_del(&mapping->list);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2226
amdgpu_vm_it_remove(mapping, &vm->va);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2227
amdgpu_vm_free_mapping(adev, vm, mapping,
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2774
struct amdgpu_bo_va_mapping *mapping, *tmp;
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2797
list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2798
if (mapping->flags & AMDGPU_VM_PAGE_PRT && prt_fini_needed) {
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2803
list_del(&mapping->list);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2804
amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2817
rbtree_postorder_for_each_entry_safe(mapping, tmp,
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2822
list_del(&mapping->list);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2823
kfree(mapping);
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
691
#define amdgpu_vm_bo_va_for_each_valid_mapping(bo_va, mapping) \
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
692
list_for_each_entry(mapping, &(bo_va)->valids, list)
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
693
#define amdgpu_vm_bo_va_for_each_invalid_mapping(bo_va, mapping) \
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
694
list_for_each_entry(mapping, &(bo_va)->invalids, list)
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
2058
struct amdgpu_bo_va_mapping *mapping;
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
2068
mapping = amdgpu_vm_bo_lookup_mapping(vm, addr/AMDGPU_GPU_PAGE_SIZE);
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
2069
if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
2072
bo = mapping->bo_va->base.bo;
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
199
struct amdgpu_bo_va_mapping *mapping;
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
206
mapping = amdgpu_vm_bo_lookup_mapping(vm, user_addr);
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
207
if (!mapping)
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
210
if (user_addr != mapping->start ||
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
211
(size != 0 && user_addr + size - 1 != mapping->last)) {
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
213
expected_size, mapping->start << AMDGPU_GPU_PAGE_SHIFT,
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
214
(mapping->last - mapping->start + 1) << AMDGPU_GPU_PAGE_SHIFT);
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
218
*pbo = amdgpu_bo_ref(mapping->bo_va->base.bo);
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
219
mapping->bo_va->queue_refcount++;
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2856
struct amdgpu_bo_va_mapping *mapping;
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2877
mapping = container_of((struct rb_node *)node,
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2879
bo = mapping->bo_va->base.bo;
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
3405
struct amdgpu_bo_va_mapping *mapping;
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
3425
mapping = container_of((struct rb_node *)node,
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
3428
*bo_s = mapping->start;
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
3429
*bo_l = mapping->last;
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c
100
if (mapping->disp_cfg_to_stream_id_valid[i] && mapping->disp_cfg_to_stream_id[i] == stream_id)
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c
1015
struct dml2_dml_to_dc_pipe_mapping *mapping,
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c
1021
populate_odm_factors(ctx, disp_cfg, mapping, state, ctx->pipe_combine_scratch.odm_factors);
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c
1023
populate_mpc_factors_for_stream(ctx, disp_cfg, mapping, state,
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c
1035
bool dml2_map_dc_pipes(struct dml2_context *ctx, struct dc_state *state, const struct dml_display_cfg_st *disp_cfg, struct dml2_dml_to_dc_pipe_mapping *mapping, const struct dc_state *existing_state)
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c
1052
ctx, state, disp_cfg, mapping, existing_state);
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c
1079
stream_disp_cfg_index = find_disp_cfg_idx_by_stream_id(mapping, stream_id);
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c
1121
plane_disp_cfg_index = find_disp_cfg_idx_by_plane_id(mapping, plane_id);
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c
1154
if (!validate_pipe_assignment(ctx, state, disp_cfg, mapping))
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c
179
static bool validate_pipe_assignment(const struct dml2_context *ctx, const struct dc_state *state, const struct dml_display_cfg_st *disp_cfg, const struct dml2_dml_to_dc_pipe_mapping *mapping)
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c
794
struct dml2_dml_to_dc_pipe_mapping *mapping,
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c
806
cfg_idx = find_disp_cfg_idx_by_plane_id(mapping, plane_id);
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c
82
static int find_disp_cfg_idx_by_plane_id(struct dml2_dml_to_dc_pipe_mapping *mapping, unsigned int plane_id)
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c
834
cfg_idx = find_disp_cfg_idx_by_plane_id(mapping, plane_id);
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c
852
struct dml2_dml_to_dc_pipe_mapping *mapping,
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c
859
mapping, stream->stream_id);
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c
87
if (mapping->disp_cfg_to_plane_id_valid[i] && mapping->disp_cfg_to_plane_id[i] == plane_id)
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c
881
mapping, main_stream->stream_id);
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c
884
mapping, stream->stream_id);
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c
923
struct dml2_dml_to_dc_pipe_mapping *mapping,
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c
935
get_target_mpc_factor(ctx, state, disp_cfg, mapping, status, state->streams[stream_idx], i) : 1;
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c
941
struct dml2_dml_to_dc_pipe_mapping *mapping,
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c
95
static int find_disp_cfg_idx_by_stream_id(struct dml2_dml_to_dc_pipe_mapping *mapping, unsigned int stream_id)
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c
950
ctx, state, disp_cfg, mapping, state->streams[i]);
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.h
50
bool dml2_map_dc_pipes(struct dml2_context *ctx, struct dc_state *state, const struct dml_display_cfg_st *disp_cfg, struct dml2_dml_to_dc_pipe_mapping *mapping, const struct dc_state *existing_state);
drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
247
} mapping;
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
419
const struct cmn2asic_msg_mapping *mapping;
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
437
mapping = &ctl->message_map[args->msg];
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
438
if (!mapping->valid_mapping)
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
441
msg_flags = mapping->flags;
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
442
index = mapping->map_to;
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
599
struct cmn2asic_mapping mapping;
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
622
mapping = smu->clock_map[index];
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
623
if (!mapping.valid_mapping)
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
626
return mapping.map_to;
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
633
mapping = smu->feature_map[index];
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
634
if (!mapping.valid_mapping)
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
637
return mapping.map_to;
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
644
mapping = smu->table_map[index];
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
645
if (!mapping.valid_mapping)
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
648
return mapping.map_to;
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
655
mapping = smu->pwr_src_map[index];
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
656
if (!mapping.valid_mapping)
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
659
return mapping.map_to;
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
666
mapping = smu->workload_map[index];
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
667
if (!mapping.valid_mapping)
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
670
return mapping.map_to;
drivers/gpu/drm/armada/armada_gem.c
220
struct address_space *mapping;
drivers/gpu/drm/armada/armada_gem.c
235
mapping = obj->obj.filp->f_mapping;
drivers/gpu/drm/armada/armada_gem.c
236
mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
drivers/gpu/drm/armada/armada_gem.c
401
struct address_space *mapping;
drivers/gpu/drm/armada/armada_gem.c
408
mapping = dobj->obj.filp->f_mapping;
drivers/gpu/drm/armada/armada_gem.c
413
page = shmem_read_mapping_page(mapping, i);
drivers/gpu/drm/drm_gem.c
667
struct address_space *mapping;
drivers/gpu/drm/drm_gem.c
677
mapping = obj->filp->f_mapping;
drivers/gpu/drm/drm_gem.c
691
mapping_set_unevictable(mapping);
drivers/gpu/drm/drm_gem.c
696
folio = shmem_read_folio_gfp(mapping, i,
drivers/gpu/drm/drm_gem.c
697
mapping_gfp_mask(mapping));
drivers/gpu/drm/drm_gem.c
709
BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
drivers/gpu/drm/drm_gem.c
716
mapping_clear_unevictable(mapping);
drivers/gpu/drm/drm_gem.c
744
struct address_space *mapping;
drivers/gpu/drm/drm_gem.c
747
mapping = file_inode(obj->filp)->i_mapping;
drivers/gpu/drm/drm_gem.c
748
mapping_clear_unevictable(mapping);
drivers/gpu/drm/drm_of.c
478
const char *mapping;
drivers/gpu/drm/drm_of.c
481
ret = of_property_read_string(port, "data-mapping", &mapping);
drivers/gpu/drm/drm_of.c
485
if (!strcmp(mapping, "jeida-18"))
drivers/gpu/drm/drm_of.c
487
if (!strcmp(mapping, "jeida-24"))
drivers/gpu/drm/drm_of.c
489
if (!strcmp(mapping, "jeida-30"))
drivers/gpu/drm/drm_of.c
491
if (!strcmp(mapping, "vesa-24"))
drivers/gpu/drm/drm_of.c
493
if (!strcmp(mapping, "vesa-30"))
drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
136
struct etnaviv_vram_mapping *mapping)
drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
138
return mapping->iova + buf->suballoc_offset;
drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
63
struct etnaviv_vram_mapping *mapping,
drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
66
return etnaviv_iommu_get_suballoc_va(context, mapping, memory_base,
drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
71
struct etnaviv_vram_mapping *mapping)
drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
73
etnaviv_iommu_put_suballoc_va(context, mapping);
drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
32
struct etnaviv_vram_mapping *mapping,
drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
35
struct etnaviv_vram_mapping *mapping);
drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
43
struct etnaviv_vram_mapping *mapping);
drivers/gpu/drm/etnaviv/etnaviv_dump.c
206
vram = submit->bos[i].mapping;
drivers/gpu/drm/etnaviv/etnaviv_gem.c
219
struct etnaviv_vram_mapping *mapping;
drivers/gpu/drm/etnaviv/etnaviv_gem.c
221
list_for_each_entry(mapping, &obj->vram_list, obj_node) {
drivers/gpu/drm/etnaviv/etnaviv_gem.c
222
if (mapping->context == context)
drivers/gpu/drm/etnaviv/etnaviv_gem.c
223
return mapping;
drivers/gpu/drm/etnaviv/etnaviv_gem.c
229
void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
drivers/gpu/drm/etnaviv/etnaviv_gem.c
231
struct etnaviv_gem_object *etnaviv_obj = mapping->object;
drivers/gpu/drm/etnaviv/etnaviv_gem.c
234
WARN_ON(mapping->use == 0);
drivers/gpu/drm/etnaviv/etnaviv_gem.c
235
mapping->use -= 1;
drivers/gpu/drm/etnaviv/etnaviv_gem.c
246
struct etnaviv_vram_mapping *mapping;
drivers/gpu/drm/etnaviv/etnaviv_gem.c
251
mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, mmu_context);
drivers/gpu/drm/etnaviv/etnaviv_gem.c
252
if (mapping) {
drivers/gpu/drm/etnaviv/etnaviv_gem.c
259
if (mapping->use == 0) {
drivers/gpu/drm/etnaviv/etnaviv_gem.c
261
if (mapping->context == mmu_context)
drivers/gpu/drm/etnaviv/etnaviv_gem.c
262
if (va && mapping->iova != va) {
drivers/gpu/drm/etnaviv/etnaviv_gem.c
263
etnaviv_iommu_reap_mapping(mapping);
drivers/gpu/drm/etnaviv/etnaviv_gem.c
264
mapping = NULL;
drivers/gpu/drm/etnaviv/etnaviv_gem.c
266
mapping->use += 1;
drivers/gpu/drm/etnaviv/etnaviv_gem.c
269
mapping = NULL;
drivers/gpu/drm/etnaviv/etnaviv_gem.c
271
if (mapping)
drivers/gpu/drm/etnaviv/etnaviv_gem.c
274
mapping->use += 1;
drivers/gpu/drm/etnaviv/etnaviv_gem.c
289
mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
drivers/gpu/drm/etnaviv/etnaviv_gem.c
290
if (!mapping) {
drivers/gpu/drm/etnaviv/etnaviv_gem.c
291
mapping = kzalloc_obj(*mapping);
drivers/gpu/drm/etnaviv/etnaviv_gem.c
292
if (!mapping) {
drivers/gpu/drm/etnaviv/etnaviv_gem.c
297
INIT_LIST_HEAD(&mapping->scan_node);
drivers/gpu/drm/etnaviv/etnaviv_gem.c
298
mapping->object = etnaviv_obj;
drivers/gpu/drm/etnaviv/etnaviv_gem.c
300
list_del(&mapping->obj_node);
drivers/gpu/drm/etnaviv/etnaviv_gem.c
303
mapping->use = 1;
drivers/gpu/drm/etnaviv/etnaviv_gem.c
307
mapping, va);
drivers/gpu/drm/etnaviv/etnaviv_gem.c
309
kfree(mapping);
drivers/gpu/drm/etnaviv/etnaviv_gem.c
311
list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
drivers/gpu/drm/etnaviv/etnaviv_gem.c
321
return mapping;
drivers/gpu/drm/etnaviv/etnaviv_gem.c
505
struct etnaviv_vram_mapping *mapping, *tmp;
drivers/gpu/drm/etnaviv/etnaviv_gem.c
514
list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
drivers/gpu/drm/etnaviv/etnaviv_gem.c
516
struct etnaviv_iommu_context *context = mapping->context;
drivers/gpu/drm/etnaviv/etnaviv_gem.c
518
WARN_ON(mapping->use);
drivers/gpu/drm/etnaviv/etnaviv_gem.c
521
etnaviv_iommu_unmap_gem(context, mapping);
drivers/gpu/drm/etnaviv/etnaviv_gem.c
523
list_del(&mapping->obj_node);
drivers/gpu/drm/etnaviv/etnaviv_gem.c
524
kfree(mapping);
drivers/gpu/drm/etnaviv/etnaviv_gem.h
125
void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping);
drivers/gpu/drm/etnaviv/etnaviv_gem.h
85
struct etnaviv_vram_mapping *mapping;
drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
218
struct etnaviv_vram_mapping *mapping;
drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
220
mapping = etnaviv_gem_mapping_get(&etnaviv_obj->base,
drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
223
if (IS_ERR(mapping)) {
drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
224
ret = PTR_ERR(mapping);
drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
229
submit->bos[i].va != mapping->iova) {
drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
230
etnaviv_gem_mapping_unreference(mapping);
drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
237
submit->bos[i].mapping = mapping;
drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
304
ptr[off] = bo->mapping->iova + r->reloc_offset;
drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
378
etnaviv_gem_mapping_unreference(submit->bos[i].mapping);
drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
380
submit->bos[i].mapping = NULL;
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
115
struct etnaviv_vram_mapping *mapping)
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
117
struct etnaviv_gem_object *etnaviv_obj = mapping->object;
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
121
etnaviv_iommu_unmap(context, mapping->vram_node.start,
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
123
drm_mm_remove_node(&mapping->vram_node);
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
126
void etnaviv_iommu_reap_mapping(struct etnaviv_vram_mapping *mapping)
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
128
struct etnaviv_iommu_context *context = mapping->context;
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
131
WARN_ON(mapping->use);
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
133
etnaviv_iommu_remove_mapping(context, mapping);
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
134
etnaviv_iommu_context_put(mapping->context);
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
135
mapping->context = NULL;
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
136
list_del_init(&mapping->mmu_node);
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
268
struct etnaviv_vram_mapping *mapping, u64 va)
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
285
mapping->iova = iova;
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
286
mapping->context = etnaviv_iommu_context_get(context);
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
287
list_add_tail(&mapping->mmu_node, &context->mappings);
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
293
node = &mapping->vram_node;
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
302
mapping->iova = node->start;
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
311
mapping->context = etnaviv_iommu_context_get(context);
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
312
list_add_tail(&mapping->mmu_node, &context->mappings);
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
320
struct etnaviv_vram_mapping *mapping)
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
322
WARN_ON(mapping->use);
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
327
if (!mapping->context) {
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
333
if (mapping->vram_node.mm == &context->mm)
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
334
etnaviv_iommu_remove_mapping(context, mapping);
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
336
list_del(&mapping->mmu_node);
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
398
struct etnaviv_vram_mapping *mapping,
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
404
if (mapping->use > 0) {
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
405
mapping->use++;
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
417
mapping->iova = paddr - memory_base;
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
419
struct drm_mm_node *node = &mapping->vram_node;
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
428
mapping->iova = node->start;
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
440
list_add_tail(&mapping->mmu_node, &context->mappings);
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
441
mapping->use = 1;
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
449
struct etnaviv_vram_mapping *mapping)
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
451
struct drm_mm_node *node = &mapping->vram_node;
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
454
mapping->use--;
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
456
if (mapping->use > 0 || context->global->version == ETNAVIV_IOMMU_V1) {
drivers/gpu/drm/etnaviv/etnaviv_mmu.h
100
struct etnaviv_vram_mapping *mapping);
drivers/gpu/drm/etnaviv/etnaviv_mmu.h
90
struct etnaviv_vram_mapping *mapping, u64 va);
drivers/gpu/drm/etnaviv/etnaviv_mmu.h
92
struct etnaviv_vram_mapping *mapping);
drivers/gpu/drm/etnaviv/etnaviv_mmu.h
93
void etnaviv_iommu_reap_mapping(struct etnaviv_vram_mapping *mapping);
drivers/gpu/drm/etnaviv/etnaviv_mmu.h
96
struct etnaviv_vram_mapping *mapping,
drivers/gpu/drm/exynos/exynos_drm_dma.c
109
if (!priv->mapping) {
drivers/gpu/drm/exynos/exynos_drm_dma.c
110
void *mapping = NULL;
drivers/gpu/drm/exynos/exynos_drm_dma.c
113
mapping = arm_iommu_create_mapping(dev,
drivers/gpu/drm/exynos/exynos_drm_dma.c
116
mapping = iommu_get_domain_for_dev(priv->dma_dev);
drivers/gpu/drm/exynos/exynos_drm_dma.c
118
if (!mapping)
drivers/gpu/drm/exynos/exynos_drm_dma.c
120
priv->mapping = mapping;
drivers/gpu/drm/exynos/exynos_drm_dma.c
140
arm_iommu_release_mapping(priv->mapping);
drivers/gpu/drm/exynos/exynos_drm_dma.c
141
priv->mapping = NULL;
drivers/gpu/drm/exynos/exynos_drm_dma.c
66
ret = arm_iommu_attach_device(subdrv_dev, priv->mapping);
drivers/gpu/drm/exynos/exynos_drm_dma.c
68
ret = iommu_attach_device(priv->mapping, subdrv_dev);
drivers/gpu/drm/exynos/exynos_drm_dma.c
92
iommu_detach_device(priv->mapping, subdrv_dev);
drivers/gpu/drm/exynos/exynos_drm_drv.h
203
void *mapping;
drivers/gpu/drm/exynos/exynos_drm_drv.h
222
return priv->mapping ? true : false;
drivers/gpu/drm/gma500/psb_intel_sdvo.c
1856
struct sdvo_device_mapping *mapping;
drivers/gpu/drm/gma500/psb_intel_sdvo.c
1859
mapping = &(dev_priv->sdvo_mappings[0]);
drivers/gpu/drm/gma500/psb_intel_sdvo.c
1861
mapping = &(dev_priv->sdvo_mappings[1]);
drivers/gpu/drm/gma500/psb_intel_sdvo.c
1863
if (mapping->initialized)
drivers/gpu/drm/gma500/psb_intel_sdvo.c
1864
sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
drivers/gpu/drm/gma500/psb_intel_sdvo.c
1873
struct sdvo_device_mapping *mapping;
drivers/gpu/drm/gma500/psb_intel_sdvo.c
1877
mapping = &dev_priv->sdvo_mappings[0];
drivers/gpu/drm/gma500/psb_intel_sdvo.c
1879
mapping = &dev_priv->sdvo_mappings[1];
drivers/gpu/drm/gma500/psb_intel_sdvo.c
1883
if (mapping->initialized) {
drivers/gpu/drm/gma500/psb_intel_sdvo.c
1884
pin = mapping->i2c_pin;
drivers/gpu/drm/gma500/psb_intel_sdvo.c
1885
speed = mapping->i2c_speed;
drivers/gpu/drm/i915/display/intel_bios.c
1235
struct sdvo_device_mapping *mapping;
drivers/gpu/drm/i915/display/intel_bios.c
1258
mapping = &display->vbt.sdvo_mappings[child->dvo_port - 1];
drivers/gpu/drm/i915/display/intel_bios.c
1259
if (!mapping->initialized) {
drivers/gpu/drm/i915/display/intel_bios.c
1260
mapping->dvo_port = child->dvo_port;
drivers/gpu/drm/i915/display/intel_bios.c
1261
mapping->target_addr = child->target_addr;
drivers/gpu/drm/i915/display/intel_bios.c
1262
mapping->dvo_wiring = child->dvo_wiring;
drivers/gpu/drm/i915/display/intel_bios.c
1263
mapping->ddc_pin = child->ddc_pin;
drivers/gpu/drm/i915/display/intel_bios.c
1264
mapping->i2c_pin = child->i2c_pin;
drivers/gpu/drm/i915/display/intel_bios.c
1265
mapping->initialized = 1;
drivers/gpu/drm/i915/display/intel_bios.c
1268
mapping->dvo_port, mapping->target_addr,
drivers/gpu/drm/i915/display/intel_bios.c
1269
mapping->dvo_wiring, mapping->ddc_pin,
drivers/gpu/drm/i915/display/intel_bios.c
1270
mapping->i2c_pin);
drivers/gpu/drm/i915/display/intel_bios.c
2183
static const u8 mapping[] = { 1, 3, 7 }; /* See VBT spec */
drivers/gpu/drm/i915/display/intel_bios.c
2185
if (val >= ARRAY_SIZE(mapping)) {
drivers/gpu/drm/i915/display/intel_bios.c
2190
return mapping[val];
drivers/gpu/drm/i915/display/intel_sdvo.c
2619
const struct sdvo_device_mapping *mapping;
drivers/gpu/drm/i915/display/intel_sdvo.c
2623
mapping = &display->vbt.sdvo_mappings[0];
drivers/gpu/drm/i915/display/intel_sdvo.c
2625
mapping = &display->vbt.sdvo_mappings[1];
drivers/gpu/drm/i915/display/intel_sdvo.c
2627
if (mapping->initialized)
drivers/gpu/drm/i915/display/intel_sdvo.c
2628
ddc_bus = (mapping->ddc_pin & 0xf0) >> 4;
drivers/gpu/drm/i915/display/intel_sdvo.c
2642
const struct sdvo_device_mapping *mapping;
drivers/gpu/drm/i915/display/intel_sdvo.c
2646
mapping = &display->vbt.sdvo_mappings[0];
drivers/gpu/drm/i915/display/intel_sdvo.c
2648
mapping = &display->vbt.sdvo_mappings[1];
drivers/gpu/drm/i915/display/intel_sdvo.c
2650
if (mapping->initialized &&
drivers/gpu/drm/i915/display/intel_sdvo.c
2651
intel_gmbus_is_valid_pin(display, mapping->i2c_pin))
drivers/gpu/drm/i915/display/intel_sdvo.c
2652
pin = mapping->i2c_pin;
drivers/gpu/drm/i915/gem/i915_gem_object.h
849
struct address_space *mapping,
drivers/gpu/drm/i915/gem/i915_gem_object.h
851
void shmem_sg_free_table(struct sg_table *st, struct address_space *mapping,
drivers/gpu/drm/i915/gem/i915_gem_object.h
853
void __shmem_writeback(size_t size, struct address_space *mapping);
drivers/gpu/drm/i915/gem/i915_gem_object_types.h
667
void *mapping;
drivers/gpu/drm/i915/gem/i915_gem_pages.c
230
if (obj->mm.mapping) {
drivers/gpu/drm/i915/gem/i915_gem_pages.c
231
unmap_object(obj, page_mask_bits(obj->mm.mapping));
drivers/gpu/drm/i915/gem/i915_gem_pages.c
232
obj->mm.mapping = NULL;
drivers/gpu/drm/i915/gem/i915_gem_pages.c
457
ptr = page_unpack_bits(obj->mm.mapping, &has_type);
drivers/gpu/drm/i915/gem/i915_gem_pages.c
551
ptr = page_unpack_bits(obj->mm.mapping, &has_type);
drivers/gpu/drm/i915/gem/i915_gem_pages.c
560
ptr = obj->mm.mapping = NULL;
drivers/gpu/drm/i915/gem/i915_gem_pages.c
579
obj->mm.mapping = page_pack_bits(ptr, type);
drivers/gpu/drm/i915/gem/i915_gem_pages.c
618
ptr = page_unpack_bits(obj->mm.mapping, &has_type);
drivers/gpu/drm/i915/gem/i915_gem_pages.c
631
GEM_BUG_ON(!obj->mm.mapping);
drivers/gpu/drm/i915/gem/i915_gem_pages.c
639
unmap_object(obj, page_mask_bits(fetch_and_zero(&obj->mm.mapping)));
drivers/gpu/drm/i915/gem/i915_gem_phys.c
107
struct address_space *mapping = obj->base.filp->f_mapping;
drivers/gpu/drm/i915/gem/i915_gem_phys.c
114
page = shmem_read_mapping_page(mapping, i);
drivers/gpu/drm/i915/gem/i915_gem_phys.c
23
struct address_space *mapping = obj->base.filp->f_mapping;
drivers/gpu/drm/i915/gem/i915_gem_phys.c
241
if (obj->mm.mapping || i915_gem_object_has_pinned_pages(obj))
drivers/gpu/drm/i915/gem/i915_gem_phys.c
69
page = shmem_read_mapping_page(mapping, i);
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
114
folio = shmem_read_folio_gfp(mapping, i, gfp);
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
136
gfp = mapping_gfp_mask(mapping);
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
192
shmem_sg_free_table(st, mapping, false, false);
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
194
mapping_clear_unevictable(mapping);
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
217
struct address_space *mapping = obj->base.filp->f_mapping;
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
235
ret = shmem_sg_alloc_table(i915, st, obj->base.size, mem, mapping,
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
248
shmem_sg_free_table(st, mapping, false, false);
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
272
shmem_sg_free_table(st, mapping, false, false);
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
307
void __shmem_writeback(size_t size, struct address_space *mapping)
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
324
while ((folio = writeback_iter(mapping, &wbc, folio, &error))) {
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
34
void shmem_sg_free_table(struct sg_table *st, struct address_space *mapping,
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
42
mapping_clear_unevictable(mapping);
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
557
struct address_space *mapping;
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
573
mapping = obj->base.filp->f_mapping;
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
574
mapping_set_gfp_mask(mapping, mask);
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
575
GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
67
struct address_space *mapping,
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
97
mapping_set_unevictable(mapping);
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
98
noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
203
!is_vmalloc_addr(obj->mm.mapping))
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
200
struct address_space *mapping;
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
210
mapping = filp->f_mapping;
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
211
mapping_set_gfp_mask(mapping, mask);
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
212
GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
drivers/gpu/drm/i915/gt/intel_gtt.c
322
return page_unpack_bits(p->mm.mapping, &type);
drivers/gpu/drm/i915/gt/selftest_timeline.c
837
page_unmask_bits(tl->hwsp_ggtt->obj->mm.mapping));
drivers/gpu/drm/i915/gt/selftest_tlb.c
184
cs = page_mask_bits(batch->mm.mapping);
drivers/gpu/drm/i915/gt/selftest_tlb.c
27
memset64(page_mask_bits(vma->obj->mm.mapping) +
drivers/gpu/drm/i915/i915_cmd_parser.c
1551
cmd = page_mask_bits(shadow->obj->mm.mapping);
drivers/gpu/drm/i915/i915_debugfs.c
98
return obj->mm.mapping ? 'M' : ' ';
drivers/gpu/drm/i915/i915_gem.c
279
gtt_user_read(struct io_mapping *mapping,
drivers/gpu/drm/i915/i915_gem.c
287
vaddr = io_mapping_map_atomic_wc(mapping, base);
drivers/gpu/drm/i915/i915_gem.c
293
vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
drivers/gpu/drm/i915/i915_gem.c
514
ggtt_write(struct io_mapping *mapping,
drivers/gpu/drm/i915/i915_gem.c
522
vaddr = io_mapping_map_atomic_wc(mapping, base);
drivers/gpu/drm/i915/i915_gem.c
527
vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
drivers/gpu/drm/imagination/pvr_device_info.c
53
u32 bitmask_size, const uintptr_t *mapping, u32 mapping_max)
drivers/gpu/drm/imagination/pvr_device_info.c
60
if (mapping == quirks_mapping)
drivers/gpu/drm/imagination/pvr_device_info.c
69
if (mapping == quirks_mapping)
drivers/gpu/drm/imagination/pvr_device_info.c
80
*(bool *)((u8 *)pvr_dev + mapping[i]) = true;
drivers/gpu/drm/imx/ipuv3/imx-ldb.c
418
const char * const mapping;
drivers/gpu/drm/imx/ipuv3/imx-ldb.c
440
if (!strcasecmp(bm, imx_ldb_bit_mappings[i].mapping) &&
drivers/gpu/drm/lima/lima_gem.c
24
struct address_space *mapping = bo->base.base.filp->f_mapping;
drivers/gpu/drm/lima/lima_gem.c
52
mapping_set_unevictable(mapping);
drivers/gpu/drm/lima/lima_gem.c
56
struct page *page = shmem_read_mapping_page(mapping, i);
drivers/gpu/drm/mediatek/mtk_hdmi_v2.c
387
static void mtk_hdmi_v2_hw_i2s_ch_mapping(struct mtk_hdmi *hdmi, u8 chnum, u8 mapping)
drivers/gpu/drm/mediatek/mtk_hdmi_v2.c
401
if (mapping == 0x0e) {
drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
983
uint32_t mapping)
drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
987
else if (!mapping)
drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
990
drm_printf(p, "%d ", mapping);
drivers/gpu/drm/nouveau/nouveau_dmem.c
285
tail->mapping = head->mapping;
drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
109
if (dev->archdata.mapping) {
drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
110
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
113
arm_iommu_release_mapping(mapping);
drivers/gpu/drm/omapdrm/omap_gem.c
1287
struct address_space *mapping;
drivers/gpu/drm/omapdrm/omap_gem.c
1357
mapping = obj->filp->f_mapping;
drivers/gpu/drm/omapdrm/omap_gem.c
1358
mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
drivers/gpu/drm/panfrost/panfrost_drv.c
139
struct panfrost_gem_mapping *mapping;
drivers/gpu/drm/panfrost/panfrost_drv.c
158
mapping = panfrost_gem_mapping_get(bo, priv);
drivers/gpu/drm/panfrost/panfrost_drv.c
159
if (mapping) {
drivers/gpu/drm/panfrost/panfrost_drv.c
160
args->offset = mapping->mmnode.start << PAGE_SHIFT;
drivers/gpu/drm/panfrost/panfrost_drv.c
161
panfrost_gem_mapping_put(mapping);
drivers/gpu/drm/panfrost/panfrost_drv.c
216
struct panfrost_gem_mapping *mapping;
drivers/gpu/drm/panfrost/panfrost_drv.c
219
mapping = panfrost_gem_mapping_get(bo, priv);
drivers/gpu/drm/panfrost/panfrost_drv.c
220
if (!mapping) {
drivers/gpu/drm/panfrost/panfrost_drv.c
226
job->mappings[i] = mapping;
drivers/gpu/drm/panfrost/panfrost_drv.c
437
struct panfrost_gem_mapping *mapping;
drivers/gpu/drm/panfrost/panfrost_drv.c
448
mapping = panfrost_gem_mapping_get(bo, priv);
drivers/gpu/drm/panfrost/panfrost_drv.c
451
if (!mapping)
drivers/gpu/drm/panfrost/panfrost_drv.c
454
args->offset = mapping->mmnode.start << PAGE_SHIFT;
drivers/gpu/drm/panfrost/panfrost_drv.c
455
panfrost_gem_mapping_put(mapping);
drivers/gpu/drm/panfrost/panfrost_dump.c
198
struct panfrost_gem_mapping *mapping;
drivers/gpu/drm/panfrost/panfrost_dump.c
204
mapping = job->mappings[i];
drivers/gpu/drm/panfrost/panfrost_dump.c
219
WARN_ON(!mapping->active);
drivers/gpu/drm/panfrost/panfrost_dump.c
226
iter.hdr->bomap.iova = mapping->mmnode.start << PAGE_SHIFT;
drivers/gpu/drm/panfrost/panfrost_gem.c
113
struct panfrost_gem_mapping *iter, *mapping = NULL;
drivers/gpu/drm/panfrost/panfrost_gem.c
119
mapping = iter;
drivers/gpu/drm/panfrost/panfrost_gem.c
125
return mapping;
drivers/gpu/drm/panfrost/panfrost_gem.c
129
panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping)
drivers/gpu/drm/panfrost/panfrost_gem.c
131
if (mapping->active)
drivers/gpu/drm/panfrost/panfrost_gem.c
132
panfrost_mmu_unmap(mapping);
drivers/gpu/drm/panfrost/panfrost_gem.c
134
spin_lock(&mapping->mmu->mm_lock);
drivers/gpu/drm/panfrost/panfrost_gem.c
135
if (drm_mm_node_allocated(&mapping->mmnode))
drivers/gpu/drm/panfrost/panfrost_gem.c
136
drm_mm_remove_node(&mapping->mmnode);
drivers/gpu/drm/panfrost/panfrost_gem.c
137
spin_unlock(&mapping->mmu->mm_lock);
drivers/gpu/drm/panfrost/panfrost_gem.c
142
struct panfrost_gem_mapping *mapping;
drivers/gpu/drm/panfrost/panfrost_gem.c
144
mapping = container_of(kref, struct panfrost_gem_mapping, refcount);
drivers/gpu/drm/panfrost/panfrost_gem.c
146
panfrost_gem_teardown_mapping(mapping);
drivers/gpu/drm/panfrost/panfrost_gem.c
147
drm_gem_object_put(&mapping->obj->base.base);
drivers/gpu/drm/panfrost/panfrost_gem.c
148
panfrost_mmu_ctx_put(mapping->mmu);
drivers/gpu/drm/panfrost/panfrost_gem.c
149
kfree(mapping);
drivers/gpu/drm/panfrost/panfrost_gem.c
152
void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping)
drivers/gpu/drm/panfrost/panfrost_gem.c
154
if (!mapping)
drivers/gpu/drm/panfrost/panfrost_gem.c
157
kref_put(&mapping->refcount, panfrost_gem_mapping_release);
drivers/gpu/drm/panfrost/panfrost_gem.c
162
struct panfrost_gem_mapping *mapping;
drivers/gpu/drm/panfrost/panfrost_gem.c
164
list_for_each_entry(mapping, &bo->mappings.list, node)
drivers/gpu/drm/panfrost/panfrost_gem.c
165
panfrost_gem_teardown_mapping(mapping);
drivers/gpu/drm/panfrost/panfrost_gem.c
176
struct panfrost_gem_mapping *mapping;
drivers/gpu/drm/panfrost/panfrost_gem.c
178
mapping = kzalloc_obj(*mapping);
drivers/gpu/drm/panfrost/panfrost_gem.c
179
if (!mapping)
drivers/gpu/drm/panfrost/panfrost_gem.c
182
INIT_LIST_HEAD(&mapping->node);
drivers/gpu/drm/panfrost/panfrost_gem.c
183
kref_init(&mapping->refcount);
drivers/gpu/drm/panfrost/panfrost_gem.c
185
mapping->obj = bo;
drivers/gpu/drm/panfrost/panfrost_gem.c
198
mapping->mmu = panfrost_mmu_ctx_get(priv->mmu);
drivers/gpu/drm/panfrost/panfrost_gem.c
199
spin_lock(&mapping->mmu->mm_lock);
drivers/gpu/drm/panfrost/panfrost_gem.c
200
ret = drm_mm_insert_node_generic(&mapping->mmu->mm, &mapping->mmnode,
drivers/gpu/drm/panfrost/panfrost_gem.c
202
spin_unlock(&mapping->mmu->mm_lock);
drivers/gpu/drm/panfrost/panfrost_gem.c
207
ret = panfrost_mmu_map(mapping);
drivers/gpu/drm/panfrost/panfrost_gem.c
214
list_add_tail(&mapping->node, &bo->mappings.list);
drivers/gpu/drm/panfrost/panfrost_gem.c
219
panfrost_gem_mapping_put(mapping);
drivers/gpu/drm/panfrost/panfrost_gem.c
227
struct panfrost_gem_mapping *mapping = NULL, *iter;
drivers/gpu/drm/panfrost/panfrost_gem.c
232
mapping = iter;
drivers/gpu/drm/panfrost/panfrost_gem.c
239
panfrost_gem_mapping_put(mapping);
drivers/gpu/drm/panfrost/panfrost_gem.h
154
void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping);
drivers/gpu/drm/panfrost/panfrost_mmu.c
461
int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
drivers/gpu/drm/panfrost/panfrost_mmu.c
463
struct panfrost_gem_object *bo = mapping->obj;
drivers/gpu/drm/panfrost/panfrost_mmu.c
471
if (WARN_ON(mapping->active))
drivers/gpu/drm/panfrost/panfrost_mmu.c
481
ret = mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT,
drivers/gpu/drm/panfrost/panfrost_mmu.c
486
mapping->active = true;
drivers/gpu/drm/panfrost/panfrost_mmu.c
495
void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping)
drivers/gpu/drm/panfrost/panfrost_mmu.c
497
struct panfrost_gem_object *bo = mapping->obj;
drivers/gpu/drm/panfrost/panfrost_mmu.c
500
struct io_pgtable_ops *ops = mapping->mmu->pgtbl_ops;
drivers/gpu/drm/panfrost/panfrost_mmu.c
501
u64 iova = mapping->mmnode.start << PAGE_SHIFT;
drivers/gpu/drm/panfrost/panfrost_mmu.c
502
size_t len = mapping->mmnode.size << PAGE_SHIFT;
drivers/gpu/drm/panfrost/panfrost_mmu.c
505
if (WARN_ON(!mapping->active))
drivers/gpu/drm/panfrost/panfrost_mmu.c
509
mapping->mmu->as, iova, len);
drivers/gpu/drm/panfrost/panfrost_mmu.c
525
panfrost_mmu_flush_range(pfdev, mapping->mmu,
drivers/gpu/drm/panfrost/panfrost_mmu.c
526
mapping->mmnode.start << PAGE_SHIFT, len);
drivers/gpu/drm/panfrost/panfrost_mmu.c
527
mapping->active = false;
drivers/gpu/drm/panfrost/panfrost_mmu.c
553
struct panfrost_gem_mapping *mapping = NULL;
drivers/gpu/drm/panfrost/panfrost_mmu.c
572
mapping = drm_mm_node_to_panfrost_mapping(node);
drivers/gpu/drm/panfrost/panfrost_mmu.c
574
kref_get(&mapping->refcount);
drivers/gpu/drm/panfrost/panfrost_mmu.c
582
return mapping;
drivers/gpu/drm/panfrost/panfrost_mmu.c
593
struct address_space *mapping;
drivers/gpu/drm/panfrost/panfrost_mmu.c
651
mapping = bo->base.base.filp->f_mapping;
drivers/gpu/drm/panfrost/panfrost_mmu.c
652
mapping_set_unevictable(mapping);
drivers/gpu/drm/panfrost/panfrost_mmu.c
658
folio = shmem_read_folio(mapping, pg);
drivers/gpu/drm/panfrost/panfrost_mmu.h
12
int panfrost_mmu_map(struct panfrost_gem_mapping *mapping);
drivers/gpu/drm/panfrost/panfrost_mmu.h
13
void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping);
drivers/gpu/drm/panfrost/panfrost_perfcnt.c
102
perfcnt->mapping = panfrost_gem_mapping_get(to_panfrost_bo(&bo->base),
drivers/gpu/drm/panfrost/panfrost_perfcnt.c
104
if (!perfcnt->mapping) {
drivers/gpu/drm/panfrost/panfrost_perfcnt.c
133
ret = panfrost_mmu_as_get(pfdev, perfcnt->mapping->mmu);
drivers/gpu/drm/panfrost/panfrost_perfcnt.c
176
panfrost_gem_mapping_put(perfcnt->mapping);
drivers/gpu/drm/panfrost/panfrost_perfcnt.c
204
drm_gem_vunmap(&perfcnt->mapping->obj->base.base, &map);
drivers/gpu/drm/panfrost/panfrost_perfcnt.c
206
panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv);
drivers/gpu/drm/panfrost/panfrost_perfcnt.c
207
panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu);
drivers/gpu/drm/panfrost/panfrost_perfcnt.c
208
panfrost_gem_mapping_put(perfcnt->mapping);
drivers/gpu/drm/panfrost/panfrost_perfcnt.c
209
perfcnt->mapping = NULL;
drivers/gpu/drm/panfrost/panfrost_perfcnt.c
30
struct panfrost_gem_mapping *mapping;
drivers/gpu/drm/panfrost/panfrost_perfcnt.c
54
gpuva = pfdev->perfcnt->mapping->mmnode.start << PAGE_SHIFT;
drivers/gpu/drm/rockchip/rockchip_drm_drv.c
63
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
drivers/gpu/drm/rockchip/rockchip_drm_drv.c
65
if (mapping) {
drivers/gpu/drm/rockchip/rockchip_drm_drv.c
67
arm_iommu_release_mapping(mapping);
drivers/gpu/drm/tegra/drm.c
951
if (client->dev->archdata.mapping) {
drivers/gpu/drm/tegra/drm.c
952
struct dma_iommu_mapping *mapping =
drivers/gpu/drm/tegra/drm.c
955
arm_iommu_release_mapping(mapping);
drivers/gpu/drm/tegra/firewall.c
32
struct tegra_drm_mapping *m = fw->submit->used_mappings[i].mapping;
drivers/gpu/drm/tegra/gem.c
512
struct host1x_bo_mapping *mapping, *tmp;
drivers/gpu/drm/tegra/gem.c
516
list_for_each_entry_safe(mapping, tmp, &bo->base.mappings, list) {
drivers/gpu/drm/tegra/gem.c
517
if (mapping->cache)
drivers/gpu/drm/tegra/gem.c
518
host1x_bo_unpin(mapping);
drivers/gpu/drm/tegra/gem.c
520
dev_err(gem->dev->dev, "mapping %p stale for device %s\n", mapping,
drivers/gpu/drm/tegra/gem.c
521
dev_name(mapping->dev));
drivers/gpu/drm/tegra/submit.c
148
struct tegra_drm_mapping *mapping;
drivers/gpu/drm/tegra/submit.c
152
mapping = xa_load(&context->mappings, id);
drivers/gpu/drm/tegra/submit.c
153
if (mapping)
drivers/gpu/drm/tegra/submit.c
154
kref_get(&mapping->ref);
drivers/gpu/drm/tegra/submit.c
158
return mapping;
drivers/gpu/drm/tegra/submit.c
229
struct drm_tegra_submit_buf *buf, struct tegra_drm_mapping *mapping)
drivers/gpu/drm/tegra/submit.c
232
dma_addr_t iova = mapping->iova + buf->reloc.target_offset;
drivers/gpu/drm/tegra/submit.c
282
struct tegra_drm_mapping *mapping;
drivers/gpu/drm/tegra/submit.c
290
mapping = tegra_drm_mapping_get(context, buf->mapping);
drivers/gpu/drm/tegra/submit.c
291
if (!mapping) {
drivers/gpu/drm/tegra/submit.c
292
SUBMIT_ERR(context, "invalid mapping ID '%u' for buffer", buf->mapping);
drivers/gpu/drm/tegra/submit.c
297
err = submit_write_reloc(context, bo, buf, mapping);
drivers/gpu/drm/tegra/submit.c
299
tegra_drm_mapping_put(mapping);
drivers/gpu/drm/tegra/submit.c
303
mappings[i].mapping = mapping;
drivers/gpu/drm/tegra/submit.c
316
tegra_drm_mapping_put(mappings[i].mapping);
drivers/gpu/drm/tegra/submit.c
500
tegra_drm_mapping_put(job_data->used_mappings[i].mapping);
drivers/gpu/drm/tegra/submit.c
670
tegra_drm_mapping_put(job_data->used_mappings[i].mapping);
drivers/gpu/drm/tegra/submit.h
8
struct tegra_drm_mapping *mapping;
drivers/gpu/drm/tegra/uapi.c
17
struct tegra_drm_mapping *mapping =
drivers/gpu/drm/tegra/uapi.c
192
struct tegra_drm_mapping *mapping;
drivers/gpu/drm/tegra/uapi.c
20
host1x_bo_unpin(mapping->map);
drivers/gpu/drm/tegra/uapi.c
209
mapping = kzalloc_obj(*mapping);
drivers/gpu/drm/tegra/uapi.c
21
host1x_bo_put(mapping->bo);
drivers/gpu/drm/tegra/uapi.c
210
if (!mapping) {
drivers/gpu/drm/tegra/uapi.c
215
kref_init(&mapping->ref);
drivers/gpu/drm/tegra/uapi.c
222
mapping->bo = tegra_gem_lookup(file, args->handle);
drivers/gpu/drm/tegra/uapi.c
223
if (!mapping->bo) {
drivers/gpu/drm/tegra/uapi.c
23
kfree(mapping);
drivers/gpu/drm/tegra/uapi.c
246
mapping->map = host1x_bo_pin(mapping_dev, mapping->bo, direction, NULL);
drivers/gpu/drm/tegra/uapi.c
247
if (IS_ERR(mapping->map)) {
drivers/gpu/drm/tegra/uapi.c
248
err = PTR_ERR(mapping->map);
drivers/gpu/drm/tegra/uapi.c
252
mapping->iova = mapping->map->phys;
drivers/gpu/drm/tegra/uapi.c
253
mapping->iova_end = mapping->iova + host1x_to_tegra_bo(mapping->bo)->gem.size;
drivers/gpu/drm/tegra/uapi.c
255
err = xa_alloc(&context->mappings, &args->mapping, mapping, XA_LIMIT(1, U32_MAX),
drivers/gpu/drm/tegra/uapi.c
26
void tegra_drm_mapping_put(struct tegra_drm_mapping *mapping)
drivers/gpu/drm/tegra/uapi.c
265
host1x_bo_unpin(mapping->map);
drivers/gpu/drm/tegra/uapi.c
267
host1x_bo_put(mapping->bo);
drivers/gpu/drm/tegra/uapi.c
269
kfree(mapping);
drivers/gpu/drm/tegra/uapi.c
279
struct tegra_drm_mapping *mapping;
drivers/gpu/drm/tegra/uapi.c
28
kref_put(&mapping->ref, tegra_drm_mapping_release);
drivers/gpu/drm/tegra/uapi.c
290
mapping = xa_erase(&context->mappings, args->mapping);
drivers/gpu/drm/tegra/uapi.c
294
if (!mapping)
drivers/gpu/drm/tegra/uapi.c
297
tegra_drm_mapping_put(mapping);
drivers/gpu/drm/tegra/uapi.c
33
struct tegra_drm_mapping *mapping;
drivers/gpu/drm/tegra/uapi.c
39
xa_for_each(&context->mappings, id, mapping)
drivers/gpu/drm/tegra/uapi.c
40
tegra_drm_mapping_put(mapping);
drivers/gpu/drm/tegra/uapi.h
55
void tegra_drm_mapping_put(struct tegra_drm_mapping *mapping);
drivers/gpu/drm/ttm/ttm_backup.c
100
struct address_space *mapping = backup->f_mapping;
drivers/gpu/drm/ttm/ttm_backup.c
105
to_folio = shmem_read_folio_gfp(mapping, idx, alloc_gfp);
drivers/gpu/drm/ttm/ttm_backup.c
54
struct address_space *mapping = backup->f_mapping;
drivers/gpu/drm/ttm/ttm_backup.c
58
from_folio = shmem_read_folio(mapping, idx);
drivers/gpu/drm/ttm/ttm_device.c
206
struct device *dev, struct address_space *mapping,
drivers/gpu/drm/ttm/ttm_device.c
242
bdev->dev_mapping = mapping;
drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
110
struct address_space *mapping = vbo->tbo.bdev->dev_mapping;
drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
131
clean_record_shared_mapping_range(mapping, offset, end, offset,
drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
175
struct address_space *mapping = vbo->tbo.bdev->dev_mapping;
drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
180
wp_shared_mapping_range(mapping, start + offset, end - start);
drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
181
clean_record_shared_mapping_range(mapping, start + offset,
drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
199
struct address_space *mapping = vbo->tbo.bdev->dev_mapping;
drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
202
unmap_shared_mapping_range(mapping, (offset + start) << PAGE_SHIFT,
drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
242
struct address_space *mapping = vbo->tbo.bdev->dev_mapping;
drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
248
wp_shared_mapping_range(mapping, offset, num_pages);
drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
249
clean_record_shared_mapping_range(mapping, offset, num_pages,
drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
72
struct address_space *mapping = vbo->tbo.bdev->dev_mapping;
drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
76
(mapping,
drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
88
wp_shared_mapping_range(mapping,
drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
90
clean_record_shared_mapping_range(mapping,
drivers/gpu/drm/xe/display/xe_panic.c
64
vram->mapping + panic->res.start);
drivers/gpu/drm/xe/xe_bo.c
1669
iosys_map_set_vaddr_iomem(&vmap, (u8 __iomem *)vram->mapping +
drivers/gpu/drm/xe/xe_bo.c
647
if (vram->mapping &&
drivers/gpu/drm/xe/xe_bo.c
649
mem->bus.addr = (u8 __force *)vram->mapping +
drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
258
mgr->mapping = devm_ioremap_wc(&pdev->dev, mgr->io_base, io_size);
drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
293
if (mem->placement & TTM_PL_FLAG_CONTIGUOUS && mgr->mapping)
drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
294
mem->bus.addr = (u8 __force *)mgr->mapping + mem->bus.offset;
drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
37
void __iomem *mapping;
drivers/gpu/drm/xe/xe_vram.c
182
xe->mem.vram->mapping = NULL;
drivers/gpu/drm/xe/xe_vram.c
185
tile->mem.vram->mapping = NULL;
drivers/gpu/drm/xe/xe_vram.c
187
tile->mem.kernel_vram->mapping = NULL;
drivers/gpu/drm/xe/xe_vram.c
231
if (vram->mapping)
drivers/gpu/drm/xe/xe_vram.c
244
vram->mapping = lmem_bar->mapping + offset;
drivers/gpu/drm/xe/xe_vram.c
58
lmem_bar->mapping = devm_ioremap_wc(&pdev->dev, lmem_bar->io_start, lmem_bar->io_size);
drivers/gpu/drm/xe/xe_vram_types.h
61
void __iomem *mapping;
drivers/gpu/host1x/bus.c
894
struct host1x_bo_mapping *mapping;
drivers/gpu/host1x/bus.c
899
list_for_each_entry(mapping, &cache->mappings, entry) {
drivers/gpu/host1x/bus.c
900
if (mapping->bo == bo && mapping->direction == dir) {
drivers/gpu/host1x/bus.c
901
kref_get(&mapping->ref);
drivers/gpu/host1x/bus.c
907
mapping = bo->ops->pin(dev, bo, dir);
drivers/gpu/host1x/bus.c
908
if (IS_ERR(mapping))
drivers/gpu/host1x/bus.c
911
spin_lock(&mapping->bo->lock);
drivers/gpu/host1x/bus.c
912
list_add_tail(&mapping->list, &bo->mappings);
drivers/gpu/host1x/bus.c
913
spin_unlock(&mapping->bo->lock);
drivers/gpu/host1x/bus.c
916
INIT_LIST_HEAD(&mapping->entry);
drivers/gpu/host1x/bus.c
917
mapping->cache = cache;
drivers/gpu/host1x/bus.c
919
list_add_tail(&mapping->entry, &cache->mappings);
drivers/gpu/host1x/bus.c
922
kref_get(&mapping->ref);
drivers/gpu/host1x/bus.c
929
return mapping;
drivers/gpu/host1x/bus.c
935
struct host1x_bo_mapping *mapping = to_host1x_bo_mapping(ref);
drivers/gpu/host1x/bus.c
941
if (mapping->cache)
drivers/gpu/host1x/bus.c
942
list_del(&mapping->entry);
drivers/gpu/host1x/bus.c
944
spin_lock(&mapping->bo->lock);
drivers/gpu/host1x/bus.c
945
list_del(&mapping->list);
drivers/gpu/host1x/bus.c
946
spin_unlock(&mapping->bo->lock);
drivers/gpu/host1x/bus.c
948
mapping->bo->ops->unpin(mapping);
drivers/gpu/host1x/bus.c
951
void host1x_bo_unpin(struct host1x_bo_mapping *mapping)
drivers/gpu/host1x/bus.c
953
struct host1x_bo_cache *cache = mapping->cache;
drivers/gpu/host1x/bus.c
958
kref_put(&mapping->ref, __host1x_bo_unpin);
drivers/gpu/host1x/dev.c
383
if (host->dev->archdata.mapping) {
drivers/gpu/host1x/dev.c
384
struct dma_iommu_mapping *mapping =
drivers/gpu/host1x/dev.c
387
arm_iommu_release_mapping(mapping);
drivers/hwmon/lm93.c
1679
int mapping;
drivers/hwmon/lm93.c
1682
mapping = (data->sf_tach_to_pwm >> (nr * 2)) & 0x03;
drivers/hwmon/lm93.c
1685
if (mapping && ((data->sfc2 >> nr) & 0x01))
drivers/hwmon/lm93.c
1686
rc = mapping;
drivers/hwmon/lm93.c
1872
int mapping = lm93_read_byte(client, LM93_REG_SF_TACH_TO_PWM);
drivers/hwmon/lm93.c
1876
mapping = (mapping >> pwm) & 0x55;
drivers/hwmon/lm93.c
1877
mask = mapping & 0x01;
drivers/hwmon/lm93.c
1878
mask |= (mapping & 0x04) >> 1;
drivers/hwmon/lm93.c
1879
mask |= (mapping & 0x10) >> 2;
drivers/hwmon/lm93.c
1880
mask |= (mapping & 0x40) >> 3;
drivers/infiniband/core/mad.c
2311
recv->header.mapping,
drivers/infiniband/core/mad.c
2965
mad_priv->header.mapping = sg_list.addr;
drivers/infiniband/core/mad.c
2993
mad_priv->header.mapping,
drivers/infiniband/core/mad.c
3026
recv->header.mapping,
drivers/infiniband/core/mad_priv.h
75
u64 mapping;
drivers/infiniband/hw/cxgb4/cq.c
104
dma_unmap_addr_set(cq, mapping, cq->dma_addr);
drivers/infiniband/hw/cxgb4/cq.c
175
dma_unmap_addr(cq, mapping));
drivers/infiniband/hw/cxgb4/cq.c
66
dma_unmap_addr(cq, mapping));
drivers/infiniband/hw/cxgb4/qp.c
103
dma_unmap_addr(sq, mapping));
drivers/infiniband/hw/cxgb4/qp.c
136
dma_unmap_addr_set(sq, mapping, sq->dma_addr);
drivers/infiniband/hw/cxgb4/qp.c
164
dma_unmap_addr(&wq->rq, mapping));
drivers/infiniband/hw/cxgb4/qp.c
2526
dma_unmap_addr(wq, mapping));
drivers/infiniband/hw/cxgb4/qp.c
2571
dma_unmap_addr_set(wq, mapping, wq->dma_addr);
drivers/infiniband/hw/cxgb4/qp.c
258
dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
drivers/infiniband/hw/cxgb4/qp.c
2650
dma_unmap_addr(wq, mapping));
drivers/infiniband/hw/cxgb4/qp.c
274
dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
drivers/infiniband/hw/cxgb4/qp.c
392
dma_unmap_addr(&wq->rq, mapping));
drivers/infiniband/hw/cxgb4/t4.h
335
DEFINE_DMA_UNMAP_ADDR(mapping);
drivers/infiniband/hw/cxgb4/t4.h
364
DEFINE_DMA_UNMAP_ADDR(mapping);
drivers/infiniband/hw/cxgb4/t4.h
401
DEFINE_DMA_UNMAP_ADDR(mapping);
drivers/infiniband/hw/cxgb4/t4.h
679
DEFINE_DMA_UNMAP_ADDR(mapping);
drivers/infiniband/hw/hfi1/user_sdma.c
249
static u8 mapping[256];
drivers/infiniband/hw/hfi1/user_sdma.c
255
memset(mapping, 0xFF, 256);
drivers/infiniband/hw/hfi1/user_sdma.c
260
if (mapping[hash] == 0xFF) {
drivers/infiniband/hw/hfi1/user_sdma.c
261
mapping[hash] = next;
drivers/infiniband/hw/hfi1/user_sdma.c
265
return mapping[hash];
drivers/infiniband/hw/mthca/mthca_allocator.c
209
dma_unmap_addr_set(&buf->direct, mapping, t);
drivers/infiniband/hw/mthca/mthca_allocator.c
248
dma_unmap_addr_set(&buf->page_list[i], mapping, t);
drivers/infiniband/hw/mthca/mthca_allocator.c
286
dma_unmap_addr(&buf->direct, mapping));
drivers/infiniband/hw/mthca/mthca_allocator.c
292
mapping));
drivers/infiniband/hw/mthca/mthca_eq.c
505
dma_unmap_addr_set(&eq->page_list[i], mapping, t);
drivers/infiniband/hw/mthca/mthca_eq.c
575
mapping));
drivers/infiniband/hw/mthca/mthca_eq.c
621
dma_unmap_addr(&eq->page_list[i], mapping));
drivers/infiniband/hw/mthca/mthca_memfree.c
631
MTHCA_ICM_PAGE_SIZE, &page->mapping,
drivers/infiniband/hw/mthca/mthca_memfree.c
638
ret = mthca_MAP_ICM_page(dev, page->mapping,
drivers/infiniband/hw/mthca/mthca_memfree.c
642
page->db_rec, page->mapping);
drivers/infiniband/hw/mthca/mthca_memfree.c
689
page->db_rec, page->mapping);
drivers/infiniband/hw/mthca/mthca_memfree.c
757
dev->db_tab->page[i].mapping);
drivers/infiniband/hw/mthca/mthca_memfree.h
141
dma_addr_t mapping;
drivers/infiniband/hw/mthca/mthca_provider.h
49
DEFINE_DMA_UNMAP_ADDR(mapping);
drivers/infiniband/ulp/ipoib/ipoib.h
186
u64 mapping[IPOIB_UD_RX_SG];
drivers/infiniband/ulp/ipoib/ipoib.h
191
u64 mapping[MAX_SKB_FRAGS + 1];
drivers/infiniband/ulp/ipoib/ipoib.h
261
u64 mapping[IPOIB_CM_RX_SG];
drivers/infiniband/ulp/ipoib/ipoib.h
546
u64 *mapping = tx_req->mapping;
drivers/infiniband/ulp/ipoib/ipoib.h
549
priv->tx_sge[0].addr = mapping[0];
drivers/infiniband/ulp/ipoib/ipoib.h
556
priv->tx_sge[i + off].addr = mapping[i + off];
drivers/infiniband/ulp/ipoib/ipoib_cm.c
102
priv->cm.rx_sge[i].addr = priv->cm.srq_ring[id].mapping[i];
drivers/infiniband/ulp/ipoib/ipoib_cm.c
108
priv->cm.srq_ring[id].mapping);
drivers/infiniband/ulp/ipoib/ipoib_cm.c
127
sge[i].addr = rx->rx_ring[id].mapping[i];
drivers/infiniband/ulp/ipoib/ipoib_cm.c
133
rx->rx_ring[id].mapping);
drivers/infiniband/ulp/ipoib/ipoib_cm.c
144
u64 mapping[IPOIB_CM_RX_SG],
drivers/infiniband/ulp/ipoib/ipoib_cm.c
161
mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE,
drivers/infiniband/ulp/ipoib/ipoib_cm.c
1623
priv->cm.srq_ring[i].mapping,
drivers/infiniband/ulp/ipoib/ipoib_cm.c
163
if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) {
drivers/infiniband/ulp/ipoib/ipoib_cm.c
175
mapping[i + 1] = ib_dma_map_page(priv->ca, page,
drivers/infiniband/ulp/ipoib/ipoib_cm.c
177
if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1])))
drivers/infiniband/ulp/ipoib/ipoib_cm.c
186
ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
drivers/infiniband/ulp/ipoib/ipoib_cm.c
189
ib_dma_unmap_page(priv->ca, mapping[i], PAGE_SIZE, DMA_FROM_DEVICE);
drivers/infiniband/ulp/ipoib/ipoib_cm.c
204
rx_ring[i].mapping);
drivers/infiniband/ulp/ipoib/ipoib_cm.c
385
rx->rx_ring[i].mapping,
drivers/infiniband/ulp/ipoib/ipoib_cm.c
568
u64 mapping[IPOIB_CM_RX_SG];
drivers/infiniband/ulp/ipoib/ipoib_cm.c
632
ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0],
drivers/infiniband/ulp/ipoib/ipoib_cm.c
635
ib_dma_sync_single_for_device(priv->ca, rx_ring[wr_id].mapping[0],
drivers/infiniband/ulp/ipoib/ipoib_cm.c
648
mapping, GFP_ATOMIC);
drivers/infiniband/ulp/ipoib/ipoib_cm.c
659
ipoib_cm_dma_unmap_rx(priv, frags, rx_ring[wr_id].mapping);
drivers/infiniband/ulp/ipoib/ipoib_cm.c
660
memcpy(rx_ring[wr_id].mapping, mapping, (frags + 1) * sizeof(*mapping));
drivers/infiniband/ulp/ipoib/ipoib_cm.c
84
u64 mapping[IPOIB_CM_RX_SG])
drivers/infiniband/ulp/ipoib/ipoib_cm.c
88
ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
drivers/infiniband/ulp/ipoib/ipoib_cm.c
91
ib_dma_unmap_page(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE);
drivers/infiniband/ulp/ipoib/ipoib_ib.c
108
priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0];
drivers/infiniband/ulp/ipoib/ipoib_ib.c
109
priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1];
drivers/infiniband/ulp/ipoib/ipoib_ib.c
115
ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping);
drivers/infiniband/ulp/ipoib/ipoib_ib.c
128
u64 *mapping;
drivers/infiniband/ulp/ipoib/ipoib_ib.c
142
mapping = priv->rx_ring[id].mapping;
drivers/infiniband/ulp/ipoib/ipoib_ib.c
143
mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
drivers/infiniband/ulp/ipoib/ipoib_ib.c
145
if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0])))
drivers/infiniband/ulp/ipoib/ipoib_ib.c
179
u64 mapping[IPOIB_UD_RX_SG];
drivers/infiniband/ulp/ipoib/ipoib_ib.c
199
ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping);
drivers/infiniband/ulp/ipoib/ipoib_ib.c
205
memcpy(mapping, priv->rx_ring[wr_id].mapping,
drivers/infiniband/ulp/ipoib/ipoib_ib.c
206
IPOIB_UD_RX_SG * sizeof(*mapping));
drivers/infiniband/ulp/ipoib/ipoib_ib.c
220
ipoib_ud_dma_unmap_rx(priv, mapping);
drivers/infiniband/ulp/ipoib/ipoib_ib.c
279
u64 *mapping = tx_req->mapping;
drivers/infiniband/ulp/ipoib/ipoib_ib.c
284
mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb),
drivers/infiniband/ulp/ipoib/ipoib_ib.c
286
if (unlikely(ib_dma_mapping_error(ca, mapping[0])))
drivers/infiniband/ulp/ipoib/ipoib_ib.c
295
mapping[i + off] = ib_dma_map_page(ca,
drivers/infiniband/ulp/ipoib/ipoib_ib.c
300
if (unlikely(ib_dma_mapping_error(ca, mapping[i + off])))
drivers/infiniband/ulp/ipoib/ipoib_ib.c
309
ib_dma_unmap_page(ca, mapping[i - !off], skb_frag_size(frag), DMA_TO_DEVICE);
drivers/infiniband/ulp/ipoib/ipoib_ib.c
313
ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
drivers/infiniband/ulp/ipoib/ipoib_ib.c
322
u64 *mapping = tx_req->mapping;
drivers/infiniband/ulp/ipoib/ipoib_ib.c
327
ib_dma_unmap_single(priv->ca, mapping[0], skb_headlen(skb),
drivers/infiniband/ulp/ipoib/ipoib_ib.c
336
ib_dma_unmap_page(priv->ca, mapping[i + off],
drivers/infiniband/ulp/ipoib/ipoib_ib.c
852
priv->rx_ring[i].mapping);
drivers/infiniband/ulp/ipoib/ipoib_ib.c
95
u64 mapping[IPOIB_UD_RX_SG])
drivers/infiniband/ulp/ipoib/ipoib_ib.c
97
ib_dma_unmap_single(priv->ca, mapping[0],
drivers/input/joystick/xpad.c
1112
if (xpad->mapping & MAP_SHARE_BUTTON) {
drivers/input/joystick/xpad.c
1113
if (xpad->mapping & MAP_SHARE_OFFSET)
drivers/input/joystick/xpad.c
1126
if (xpad->mapping & MAP_DPAD_TO_BUTTONS) {
drivers/input/joystick/xpad.c
1147
if (!(xpad->mapping & MAP_STICKS_TO_NULL)) {
drivers/input/joystick/xpad.c
1162
if (xpad->mapping & MAP_TRIGGERS_TO_BUTTONS) {
drivers/input/joystick/xpad.c
1175
if (xpad->mapping & MAP_PROFILE_BUTTON)
drivers/input/joystick/xpad.c
1180
if (xpad->mapping & MAP_PADDLES) {
drivers/input/joystick/xpad.c
130
u8 mapping;
drivers/input/joystick/xpad.c
1986
if (!(xpad->mapping & MAP_STICKS_TO_NULL)) {
drivers/input/joystick/xpad.c
2001
if (xpad->mapping & MAP_SHARE_BUTTON)
drivers/input/joystick/xpad.c
2008
if (xpad->mapping & MAP_DPAD_TO_BUTTONS) {
drivers/input/joystick/xpad.c
2015
if (xpad->mapping & MAP_PADDLES) {
drivers/input/joystick/xpad.c
2026
if (!(xpad->mapping & MAP_DPAD_TO_BUTTONS) ||
drivers/input/joystick/xpad.c
2032
if (xpad->mapping & MAP_TRIGGERS_TO_BUTTONS) {
drivers/input/joystick/xpad.c
2042
if (xpad->mapping & MAP_PROFILE_BUTTON)
drivers/input/joystick/xpad.c
2107
xpad->mapping = xpad_device[i].mapping;
drivers/input/joystick/xpad.c
2129
xpad->mapping |= MAP_DPAD_TO_BUTTONS;
drivers/input/joystick/xpad.c
2131
xpad->mapping |= MAP_TRIGGERS_TO_BUTTONS;
drivers/input/joystick/xpad.c
2133
xpad->mapping |= MAP_STICKS_TO_NULL;
drivers/input/joystick/xpad.c
799
int mapping; /* map d-pad to buttons or to axes */
drivers/input/joystick/xpad.c
829
if (!(xpad->mapping & MAP_STICKS_TO_NULL)) {
drivers/input/joystick/xpad.c
844
if (xpad->mapping & MAP_TRIGGERS_TO_BUTTONS) {
drivers/input/joystick/xpad.c
853
if (xpad->mapping & MAP_DPAD_TO_BUTTONS) {
drivers/input/joystick/xpad.c
904
if (xpad->mapping & MAP_DPAD_TO_BUTTONS) {
drivers/input/joystick/xpad.c
918
if (!(xpad->mapping & MAP_DPAD_TO_BUTTONS) ||
drivers/input/joystick/xpad.c
943
if (!(xpad->mapping & MAP_STICKS_TO_NULL)) {
drivers/input/joystick/xpad.c
958
if (xpad->mapping & MAP_TRIGGERS_TO_BUTTONS) {
drivers/iommu/iommu-pages.c
16
IOPTDESC_MATCH(mapping, __page_mapping);
drivers/iommu/ipmmu-vmsa.c
1114
arm_iommu_release_mapping(mmu->mapping);
drivers/iommu/ipmmu-vmsa.c
67
struct dma_iommu_mapping *mapping;
drivers/iommu/ipmmu-vmsa.c
807
if (!mmu->mapping) {
drivers/iommu/ipmmu-vmsa.c
808
struct dma_iommu_mapping *mapping;
drivers/iommu/ipmmu-vmsa.c
810
mapping = arm_iommu_create_mapping(dev, SZ_1G, SZ_2G);
drivers/iommu/ipmmu-vmsa.c
811
if (IS_ERR(mapping)) {
drivers/iommu/ipmmu-vmsa.c
813
ret = PTR_ERR(mapping);
drivers/iommu/ipmmu-vmsa.c
817
mmu->mapping = mapping;
drivers/iommu/ipmmu-vmsa.c
821
ret = arm_iommu_attach_device(dev, mmu->mapping);
drivers/iommu/ipmmu-vmsa.c
830
if (mmu->mapping)
drivers/iommu/ipmmu-vmsa.c
831
arm_iommu_release_mapping(mmu->mapping);
drivers/iommu/ipmmu-vmsa.c
873
arm_iommu_release_mapping(mmu->mapping);
drivers/iommu/mtk_iommu_v1.c
115
struct dma_iommu_mapping *mapping;
drivers/iommu/mtk_iommu_v1.c
316
mtk_mapping = data->mapping;
drivers/iommu/mtk_iommu_v1.c
450
mtk_mapping = data->mapping;
drivers/iommu/mtk_iommu_v1.c
457
data->mapping = mtk_mapping;
drivers/iommu/mtk_iommu_v1.c
522
err = arm_iommu_attach_device(dev, data->mapping);
drivers/iommu/virtio-iommu.c
334
struct viommu_mapping *mapping;
drivers/iommu/virtio-iommu.c
336
mapping = kzalloc_obj(*mapping, GFP_ATOMIC);
drivers/iommu/virtio-iommu.c
337
if (!mapping)
drivers/iommu/virtio-iommu.c
340
mapping->paddr = paddr;
drivers/iommu/virtio-iommu.c
341
mapping->iova.start = iova;
drivers/iommu/virtio-iommu.c
342
mapping->iova.last = end;
drivers/iommu/virtio-iommu.c
343
mapping->flags = flags;
drivers/iommu/virtio-iommu.c
346
interval_tree_insert(&mapping->iova, &vdomain->mappings);
drivers/iommu/virtio-iommu.c
366
struct viommu_mapping *mapping = NULL;
drivers/iommu/virtio-iommu.c
373
mapping = container_of(node, struct viommu_mapping, iova);
drivers/iommu/virtio-iommu.c
377
if (mapping->iova.start < iova)
drivers/iommu/virtio-iommu.c
384
unmapped += mapping->iova.last - mapping->iova.start + 1;
drivers/iommu/virtio-iommu.c
387
kfree(mapping);
drivers/iommu/virtio-iommu.c
454
struct viommu_mapping *mapping;
drivers/iommu/virtio-iommu.c
461
mapping = container_of(node, struct viommu_mapping, iova);
drivers/iommu/virtio-iommu.c
465
.virt_start = cpu_to_le64(mapping->iova.start),
drivers/iommu/virtio-iommu.c
466
.virt_end = cpu_to_le64(mapping->iova.last),
drivers/iommu/virtio-iommu.c
467
.phys_start = cpu_to_le64(mapping->paddr),
drivers/iommu/virtio-iommu.c
468
.flags = cpu_to_le32(mapping->flags),
drivers/iommu/virtio-iommu.c
920
struct viommu_mapping *mapping;
drivers/iommu/virtio-iommu.c
927
mapping = container_of(node, struct viommu_mapping, iova);
drivers/iommu/virtio-iommu.c
928
paddr = mapping->paddr + (iova - mapping->iova.start);
drivers/md/dm-cache-metadata.c
1324
__le64 mapping;
drivers/md/dm-cache-metadata.c
1335
memcpy(&mapping, mapping_value_le, sizeof(mapping));
drivers/md/dm-cache-metadata.c
1336
unpack_value(mapping, &oblock, &flags);
drivers/md/dm-cache-metadata.c
1366
__le64 mapping;
drivers/md/dm-cache-metadata.c
1377
memcpy(&mapping, mapping_value_le, sizeof(mapping));
drivers/md/dm-cache-metadata.c
1378
unpack_value(mapping, &oblock, &flags);
drivers/md/dm-pcache/cache_dev.c
103
cache_dev->mapping = vaddr;
drivers/md/dm-pcache/cache_dev.c
112
cache_dev->mapping = vaddr;
drivers/md/dm-pcache/cache_dev.c
16
vunmap(cache_dev->mapping);
drivers/md/dm-pcache/cache_dev.h
27
#define CACHE_DEV_SB(cache_dev) ((struct pcache_sb *)(cache_dev->mapping + PCACHE_SB_OFF))
drivers/md/dm-pcache/cache_dev.h
28
#define CACHE_DEV_CACHE_INFO(cache_dev) ((void *)cache_dev->mapping + PCACHE_CACHE_INFO_OFF)
drivers/md/dm-pcache/cache_dev.h
29
#define CACHE_DEV_CACHE_CTRL(cache_dev) ((void *)cache_dev->mapping + PCACHE_CACHE_CTRL_OFF)
drivers/md/dm-pcache/cache_dev.h
30
#define CACHE_DEV_SEGMENTS(cache_dev) ((void *)cache_dev->mapping + PCACHE_SEGMENTS_OFF)
drivers/md/dm-pcache/cache_dev.h
53
void *mapping;
drivers/md/dm-vdo/block-map.c
1778
const struct data_location *mapping,
drivers/md/dm-vdo/block-map.c
1781
if (!vdo_is_valid_location(mapping) ||
drivers/md/dm-vdo/block-map.c
1782
vdo_is_state_compressed(mapping->state) ||
drivers/md/dm-vdo/block-map.c
1783
(vdo_is_mapped_location(mapping) && (mapping->pbn == VDO_ZERO_BLOCK)))
drivers/md/dm-vdo/block-map.c
1790
return !vdo_is_physical_data_block(vdo->depot, mapping->pbn);
drivers/md/dm-vdo/block-map.c
1802
struct data_location mapping =
drivers/md/dm-vdo/block-map.c
1805
if (is_invalid_tree_entry(vdo_from_data_vio(data_vio), &mapping, lock->height)) {
drivers/md/dm-vdo/block-map.c
1808
(unsigned long long) mapping.pbn, mapping.state,
drivers/md/dm-vdo/block-map.c
1815
if (!vdo_is_mapped_location(&mapping)) {
drivers/md/dm-vdo/block-map.c
1822
lock->tree_slots[lock->height - 1].block_map_slot.pbn = mapping.pbn;
drivers/md/dm-vdo/block-map.c
2242
struct data_location mapping;
drivers/md/dm-vdo/block-map.c
2282
mapping = vdo_unpack_block_map_entry(&page->entries[tree_slot.block_map_slot.slot]);
drivers/md/dm-vdo/block-map.c
2283
if (is_invalid_tree_entry(vdo_from_data_vio(data_vio), &mapping, lock->height)) {
drivers/md/dm-vdo/block-map.c
2286
(unsigned long long) mapping.pbn, mapping.state,
drivers/md/dm-vdo/block-map.c
2293
if (!vdo_is_mapped_location(&mapping)) {
drivers/md/dm-vdo/block-map.c
2299
lock->tree_slots[lock->height - 1].block_map_slot.pbn = mapping.pbn;
drivers/md/dm-vdo/block-map.c
2317
struct data_location mapping;
drivers/md/dm-vdo/block-map.c
2331
mapping = vdo_unpack_block_map_entry(&page->entries[slot]);
drivers/md/dm-vdo/block-map.c
2332
if (!vdo_is_valid_location(&mapping) || vdo_is_state_compressed(mapping.state))
drivers/md/dm-vdo/block-map.c
2334
return mapping.pbn;
drivers/md/dm-vdo/encodings.h
220
struct data_location mapping;
drivers/md/dm-vdo/encodings.h
257
struct block_map_entry mapping;
drivers/md/dm-vdo/encodings.h
888
.mapping = vdo_pack_block_map_entry(entry->mapping.pbn,
drivers/md/dm-vdo/encodings.h
889
entry->mapping.state),
drivers/md/dm-vdo/encodings.h
914
.mapping = vdo_unpack_block_map_entry(&entry->mapping),
drivers/md/dm-vdo/recovery-journal.c
1350
.mapping = {
drivers/md/dm-vdo/repair.c
1302
entry->mapping = vdo_unpack_block_map_entry(&packed_entry->block_map_entry);
drivers/md/dm-vdo/repair.c
1341
.block_map_entry = vdo_pack_block_map_entry(entry.mapping.pbn,
drivers/md/dm-vdo/repair.c
1342
entry.mapping.state),
drivers/md/dm-vdo/repair.c
1485
.block_map_entry = vdo_pack_block_map_entry(entry.mapping.pbn,
drivers/md/dm-vdo/repair.c
1486
entry.mapping.state),
drivers/md/dm-vdo/repair.c
1542
if (vdo_is_mapped_location(&entry.mapping))
drivers/md/dm-vdo/repair.c
405
struct data_location mapping = vdo_unpack_block_map_entry(&page->entries[slot]);
drivers/md/dm-vdo/repair.c
407
if (vdo_is_mapped_location(&mapping))
drivers/md/dm-vdo/repair.c
425
struct data_location mapping = vdo_unpack_block_map_entry(&page->entries[slot]);
drivers/md/dm-vdo/repair.c
427
if (!vdo_is_valid_location(&mapping)) {
drivers/md/dm-vdo/repair.c
433
if (!vdo_is_mapped_location(&mapping))
drivers/md/dm-vdo/repair.c
437
if (mapping.pbn == VDO_ZERO_BLOCK)
drivers/md/dm-vdo/repair.c
440
if (!vdo_is_physical_data_block(depot, mapping.pbn)) {
drivers/md/dm-vdo/repair.c
449
result = vdo_adjust_reference_count_for_rebuild(depot, mapping.pbn,
drivers/md/dm-vdo/repair.c
457
slot, (unsigned long long) mapping.pbn);
drivers/md/dm-vdo/repair.c
756
!vdo_is_valid_location(&entry->mapping) ||
drivers/md/dm-vdo/repair.c
758
!vdo_is_physical_data_block(vdo->depot, entry->mapping.pbn) ||
drivers/md/dm-vdo/repair.c
766
(unsigned long long) entry->mapping.pbn);
drivers/md/dm-vdo/repair.c
770
(vdo_is_state_compressed(entry->mapping.state) ||
drivers/md/dm-vdo/repair.c
771
(entry->mapping.pbn == VDO_ZERO_BLOCK) ||
drivers/md/dm-vdo/repair.c
780
(unsigned long long) entry->mapping.pbn);
drivers/md/dm-vdo/repair.c
822
pbn = entry.mapping.pbn;
drivers/media/pci/cx18/cx18-ioctl.c
718
static const int mapping[8] = {
drivers/media/pci/cx18/cx18-ioctl.c
744
e_idx->flags = mapping[le32_to_cpu(e_buf->flags) & 0x7];
drivers/media/pci/ivtv/ivtv-fileops.c
177
const int mapping[8] = { -1, V4L2_ENC_IDX_FRAME_I, V4L2_ENC_IDX_FRAME_P, -1,
drivers/media/pci/ivtv/ivtv-fileops.c
188
e->flags = mapping[read_enc(addr + 12) & 7];
drivers/media/platform/nvidia/tegra-vde/iommu.c
74
if (dev->archdata.mapping) {
drivers/media/platform/nvidia/tegra-vde/iommu.c
75
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
drivers/media/platform/nvidia/tegra-vde/iommu.c
78
arm_iommu_release_mapping(mapping);
drivers/media/platform/ti/omap3isp/isp.c
1916
arm_iommu_release_mapping(isp->mapping);
drivers/media/platform/ti/omap3isp/isp.c
1917
isp->mapping = NULL;
drivers/media/platform/ti/omap3isp/isp.c
1924
struct dma_iommu_mapping *mapping;
drivers/media/platform/ti/omap3isp/isp.c
1928
mapping = to_dma_iommu_mapping(isp->dev);
drivers/media/platform/ti/omap3isp/isp.c
1929
if (mapping) {
drivers/media/platform/ti/omap3isp/isp.c
1931
arm_iommu_release_mapping(mapping);
drivers/media/platform/ti/omap3isp/isp.c
1938
mapping = arm_iommu_create_mapping(isp->dev, SZ_1G, SZ_2G);
drivers/media/platform/ti/omap3isp/isp.c
1939
if (IS_ERR(mapping)) {
drivers/media/platform/ti/omap3isp/isp.c
1941
return PTR_ERR(mapping);
drivers/media/platform/ti/omap3isp/isp.c
1944
isp->mapping = mapping;
drivers/media/platform/ti/omap3isp/isp.c
1947
ret = arm_iommu_attach_device(isp->dev, mapping);
drivers/media/platform/ti/omap3isp/isp.c
1956
arm_iommu_release_mapping(isp->mapping);
drivers/media/platform/ti/omap3isp/isp.c
1957
isp->mapping = NULL;
drivers/media/platform/ti/omap3isp/isp.h
188
struct dma_iommu_mapping *mapping;
drivers/media/usb/uvc/uvc_ctrl.c
1033
static s32 uvc_menu_to_v4l2_menu(struct uvc_control_mapping *mapping, s32 val)
drivers/media/usb/uvc/uvc_ctrl.c
1037
for (i = 0; BIT(i) <= mapping->menu_mask; ++i) {
drivers/media/usb/uvc/uvc_ctrl.c
1040
if (!test_bit(i, &mapping->menu_mask))
drivers/media/usb/uvc/uvc_ctrl.c
1043
menu_value = uvc_mapping_get_menu_value(mapping, i);
drivers/media/usb/uvc/uvc_ctrl.c
1058
static int uvc_get_le_value(struct uvc_control_mapping *mapping,
drivers/media/usb/uvc/uvc_ctrl.c
1062
int offset = mapping->offset;
drivers/media/usb/uvc/uvc_ctrl.c
1063
int bits = mapping->size;
drivers/media/usb/uvc/uvc_ctrl.c
1089
if (mapping->data_type == UVC_CTRL_DATA_TYPE_SIGNED)
drivers/media/usb/uvc/uvc_ctrl.c
1090
value |= -(value & (1 << (mapping->size - 1)));
drivers/media/usb/uvc/uvc_ctrl.c
1093
if (mapping->v4l2_type != V4L2_CTRL_TYPE_MENU) {
drivers/media/usb/uvc/uvc_ctrl.c
1101
*out = uvc_menu_to_v4l2_menu(mapping, value);
drivers/media/usb/uvc/uvc_ctrl.c
1113
static int uvc_set_le_value(struct uvc_control_mapping *mapping,
drivers/media/usb/uvc/uvc_ctrl.c
1117
int offset = mapping->offset;
drivers/media/usb/uvc/uvc_ctrl.c
1118
int bits = mapping->size;
drivers/media/usb/uvc/uvc_ctrl.c
1128
switch (mapping->v4l2_type) {
drivers/media/usb/uvc/uvc_ctrl.c
1130
value = uvc_mapping_get_menu_value(mapping, value);
drivers/media/usb/uvc/uvc_ctrl.c
1174
struct uvc_control_mapping **mapping, struct uvc_control **control,
drivers/media/usb/uvc/uvc_ctrl.c
1192
*mapping = map;
drivers/media/usb/uvc/uvc_ctrl.c
1196
if ((*mapping == NULL || (*mapping)->id > map->id) &&
drivers/media/usb/uvc/uvc_ctrl.c
1201
*mapping = map;
drivers/media/usb/uvc/uvc_ctrl.c
1208
u32 v4l2_id, struct uvc_control_mapping **mapping)
drivers/media/usb/uvc/uvc_ctrl.c
1215
*mapping = NULL;
drivers/media/usb/uvc/uvc_ctrl.c
1222
__uvc_find_control(entity, v4l2_id, mapping, &ctrl, next,
drivers/media/usb/uvc/uvc_ctrl.c
1330
struct uvc_control_mapping *mapping,
drivers/media/usb/uvc/uvc_ctrl.c
1342
*value = uvc_mapping_get_s32(mapping, UVC_GET_CUR,
drivers/media/usb/uvc/uvc_ctrl.c
1394
struct uvc_control_mapping *mapping)
drivers/media/usb/uvc/uvc_ctrl.c
1403
switch (mapping->v4l2_type) {
drivers/media/usb/uvc/uvc_ctrl.c
1440
struct uvc_control_mapping *mapping;
drivers/media/usb/uvc/uvc_ctrl.c
1455
ctrl = uvc_find_control(chain, v4l2_id, &mapping);
drivers/media/usb/uvc/uvc_ctrl.c
1464
return uvc_ctrl_is_readable(ctrls->which, ctrl, mapping);
drivers/media/usb/uvc/uvc_ctrl.c
1469
if (ioctl != VIDIOC_S_EXT_CTRLS || !mapping->master_id)
drivers/media/usb/uvc/uvc_ctrl.c
1477
if (ctrls->controls[i].id == mapping->master_id)
drivers/media/usb/uvc/uvc_ctrl.c
1479
mapping->master_manual ? 0 : -EACCES;
drivers/media/usb/uvc/uvc_ctrl.c
1482
__uvc_find_control(ctrl->entity, mapping->master_id, &master_map,
drivers/media/usb/uvc/uvc_ctrl.c
1491
if (ret >= 0 && val != mapping->master_manual)
drivers/media/usb/uvc/uvc_ctrl.c
1512
struct uvc_control_mapping *mapping)
drivers/media/usb/uvc/uvc_ctrl.c
1520
return uvc_mapping_get_s32(mapping, UVC_GET_RES,
drivers/media/usb/uvc/uvc_ctrl.c
1524
return uvc_mapping_get_s32(mapping, UVC_GET_MAX,
drivers/media/usb/uvc/uvc_ctrl.c
1549
struct uvc_control_mapping *mapping,
drivers/media/usb/uvc/uvc_ctrl.c
1567
v4l2_ctrl->default_value = uvc_mapping_get_s32(mapping,
drivers/media/usb/uvc/uvc_ctrl.c
1571
switch (mapping->v4l2_type) {
drivers/media/usb/uvc/uvc_ctrl.c
1573
v4l2_ctrl->minimum = ffs(mapping->menu_mask) - 1;
drivers/media/usb/uvc/uvc_ctrl.c
1574
v4l2_ctrl->maximum = fls(mapping->menu_mask) - 1;
drivers/media/usb/uvc/uvc_ctrl.c
1592
v4l2_ctrl->maximum = uvc_get_ctrl_bitmap(ctrl, mapping);
drivers/media/usb/uvc/uvc_ctrl.c
1601
v4l2_ctrl->maximum = uvc_mapping_get_s32(mapping, UVC_GET_MAX,
drivers/media/usb/uvc/uvc_ctrl.c
1620
v4l2_ctrl->minimum = uvc_mapping_get_s32(mapping,
drivers/media/usb/uvc/uvc_ctrl.c
1627
v4l2_ctrl->step = uvc_mapping_get_s32(mapping, UVC_GET_RES,
drivers/media/usb/uvc/uvc_ctrl.c
1635
static size_t uvc_mapping_v4l2_size(struct uvc_control_mapping *mapping)
drivers/media/usb/uvc/uvc_ctrl.c
1637
if (mapping->v4l2_type == V4L2_CTRL_TYPE_RECT)
drivers/media/usb/uvc/uvc_ctrl.c
1640
if (uvc_ctrl_mapping_is_compound(mapping))
drivers/media/usb/uvc/uvc_ctrl.c
1641
return DIV_ROUND_UP(mapping->size, 8);
drivers/media/usb/uvc/uvc_ctrl.c
1648
struct uvc_control_mapping *mapping,
drivers/media/usb/uvc/uvc_ctrl.c
1656
v4l2_ctrl->id = mapping->id;
drivers/media/usb/uvc/uvc_ctrl.c
1657
v4l2_ctrl->type = mapping->v4l2_type;
drivers/media/usb/uvc/uvc_ctrl.c
1658
strscpy(v4l2_ctrl->name, uvc_map_get_name(mapping),
drivers/media/usb/uvc/uvc_ctrl.c
1670
if (mapping->master_id)
drivers/media/usb/uvc/uvc_ctrl.c
1671
__uvc_find_control(ctrl->entity, mapping->master_id,
drivers/media/usb/uvc/uvc_ctrl.c
1696
if (val != mapping->master_manual)
drivers/media/usb/uvc/uvc_ctrl.c
1701
v4l2_ctrl->elem_size = uvc_mapping_v4l2_size(mapping);
drivers/media/usb/uvc/uvc_ctrl.c
1713
ret = __uvc_queryctrl_boundaries(chain, ctrl, mapping, v4l2_ctrl);
drivers/media/usb/uvc/uvc_ctrl.c
1714
if (ret && !mapping->disabled) {
drivers/media/usb/uvc/uvc_ctrl.c
1717
mapping->id, uvc_map_get_name(mapping), ret);
drivers/media/usb/uvc/uvc_ctrl.c
1718
mapping->disabled = true;
drivers/media/usb/uvc/uvc_ctrl.c
1721
if (mapping->disabled)
drivers/media/usb/uvc/uvc_ctrl.c
1731
struct uvc_control_mapping *mapping;
drivers/media/usb/uvc/uvc_ctrl.c
1745
ctrl = uvc_find_control(chain, v4l2_ctrl->id, &mapping);
drivers/media/usb/uvc/uvc_ctrl.c
1757
ret = uvc_query_v4l2_class(chain, v4l2_ctrl->id, mapping->id,
drivers/media/usb/uvc/uvc_ctrl.c
1763
ret = __uvc_query_v4l2_ctrl(chain, ctrl, mapping, v4l2_ctrl);
drivers/media/usb/uvc/uvc_ctrl.c
1781
struct uvc_control_mapping *mapping;
drivers/media/usb/uvc/uvc_ctrl.c
1792
if (index >= BITS_PER_TYPE(mapping->menu_mask))
drivers/media/usb/uvc/uvc_ctrl.c
1799
ctrl = uvc_find_control(chain, query_menu->id, &mapping);
drivers/media/usb/uvc/uvc_ctrl.c
1800
if (ctrl == NULL || mapping->v4l2_type != V4L2_CTRL_TYPE_MENU) {
drivers/media/usb/uvc/uvc_ctrl.c
1805
if (!test_bit(query_menu->index, &mapping->menu_mask)) {
drivers/media/usb/uvc/uvc_ctrl.c
1810
if (mapping->data_type == UVC_CTRL_DATA_TYPE_BITMASK) {
drivers/media/usb/uvc/uvc_ctrl.c
1819
mask = uvc_mapping_get_menu_value(mapping, query_menu->index);
drivers/media/usb/uvc/uvc_ctrl.c
1825
if (!(uvc_get_ctrl_bitmap(ctrl, mapping) & mask)) {
drivers/media/usb/uvc/uvc_ctrl.c
1831
name = uvc_mapping_get_menu_name(mapping, query_menu->index);
drivers/media/usb/uvc/uvc_ctrl.c
1851
struct uvc_control_mapping *mapping,
drivers/media/usb/uvc/uvc_ctrl.c
1856
__uvc_query_v4l2_ctrl(chain, ctrl, mapping, &v4l2_ctrl);
drivers/media/usb/uvc/uvc_ctrl.c
1880
struct uvc_control_mapping *mapping, s32 value, u32 changes)
drivers/media/usb/uvc/uvc_ctrl.c
1886
if (list_empty(&mapping->ev_subs))
drivers/media/usb/uvc/uvc_ctrl.c
1889
uvc_ctrl_fill_event(chain, &ev, ctrl, mapping, value, changes);
drivers/media/usb/uvc/uvc_ctrl.c
1891
list_for_each_entry(sev, &mapping->ev_subs, node) {
drivers/media/usb/uvc/uvc_ctrl.c
1907
struct uvc_control_mapping *mapping = NULL;
drivers/media/usb/uvc/uvc_ctrl.c
1912
__uvc_find_control(master->entity, slave_id, &mapping, &ctrl, 0, 0);
drivers/media/usb/uvc/uvc_ctrl.c
1916
if (uvc_ctrl_mapping_is_compound(mapping) ||
drivers/media/usb/uvc/uvc_ctrl.c
1917
__uvc_ctrl_get(chain, ctrl, mapping, &val) == 0)
drivers/media/usb/uvc/uvc_ctrl.c
1920
uvc_ctrl_send_event(chain, handle, ctrl, mapping, val, changes);
drivers/media/usb/uvc/uvc_ctrl.c
1971
struct uvc_control_mapping *mapping;
drivers/media/usb/uvc/uvc_ctrl.c
1984
list_for_each_entry(mapping, &ctrl->info.mappings, list) {
drivers/media/usb/uvc/uvc_ctrl.c
1987
if (uvc_ctrl_mapping_is_compound(mapping))
drivers/media/usb/uvc/uvc_ctrl.c
1990
value = uvc_mapping_get_s32(mapping, UVC_GET_CUR, data);
drivers/media/usb/uvc/uvc_ctrl.c
1996
for (i = 0; i < ARRAY_SIZE(mapping->slave_ids); ++i) {
drivers/media/usb/uvc/uvc_ctrl.c
1997
if (!mapping->slave_ids[i])
drivers/media/usb/uvc/uvc_ctrl.c
2001
mapping->slave_ids[i]);
drivers/media/usb/uvc/uvc_ctrl.c
2004
uvc_ctrl_send_event(chain, handle, ctrl, mapping, value,
drivers/media/usb/uvc/uvc_ctrl.c
2069
struct uvc_control_mapping *mapping;
drivers/media/usb/uvc/uvc_ctrl.c
2078
ctrl = uvc_find_control(handle->chain, xctrls[i].id, &mapping);
drivers/media/usb/uvc/uvc_ctrl.c
2086
for (j = 0; j < ARRAY_SIZE(mapping->slave_ids); ++j) {
drivers/media/usb/uvc/uvc_ctrl.c
2087
u32 slave_id = mapping->slave_ids[j];
drivers/media/usb/uvc/uvc_ctrl.c
2104
if (uvc_ctrl_mapping_is_compound(mapping))
drivers/media/usb/uvc/uvc_ctrl.c
2112
if (mapping->master_id &&
drivers/media/usb/uvc/uvc_ctrl.c
2114
mapping->master_id))
drivers/media/usb/uvc/uvc_ctrl.c
2117
uvc_ctrl_send_event(handle->chain, handle, ctrl, mapping,
drivers/media/usb/uvc/uvc_ctrl.c
2125
struct uvc_control_mapping *mapping;
drivers/media/usb/uvc/uvc_ctrl.c
2138
ctrl = uvc_find_control(handle->chain, sev->id, &mapping);
drivers/media/usb/uvc/uvc_ctrl.c
2153
if (uvc_ctrl_mapping_is_compound(mapping) ||
drivers/media/usb/uvc/uvc_ctrl.c
2154
__uvc_ctrl_get(handle->chain, ctrl, mapping, &val) == 0)
drivers/media/usb/uvc/uvc_ctrl.c
2157
uvc_ctrl_fill_event(handle->chain, &ev, ctrl, mapping, val,
drivers/media/usb/uvc/uvc_ctrl.c
2170
list_add_tail(&sev->node, &mapping->ev_subs);
drivers/media/usb/uvc/uvc_ctrl.c
2303
struct uvc_control_mapping *mapping = NULL;
drivers/media/usb/uvc/uvc_ctrl.c
2311
__uvc_find_control(entity, ctrls->controls[i].id, &mapping,
drivers/media/usb/uvc/uvc_ctrl.c
2359
struct uvc_control_mapping *mapping,
drivers/media/usb/uvc/uvc_ctrl.c
2390
size = uvc_mapping_v4l2_size(mapping);
drivers/media/usb/uvc/uvc_ctrl.c
2408
ret = mapping->get(mapping, query, uvc_ctrl_data(ctrl, id), size, data);
drivers/media/usb/uvc/uvc_ctrl.c
2422
struct uvc_control_mapping *mapping,
drivers/media/usb/uvc/uvc_ctrl.c
2430
return __uvc_ctrl_get(chain, ctrl, mapping, &xctrl->value);
drivers/media/usb/uvc/uvc_ctrl.c
2439
ret = __uvc_queryctrl_boundaries(chain, ctrl, mapping, &qec);
drivers/media/usb/uvc/uvc_ctrl.c
2460
struct uvc_control_mapping *mapping,
drivers/media/usb/uvc/uvc_ctrl.c
2463
if (uvc_ctrl_mapping_is_compound(mapping))
drivers/media/usb/uvc/uvc_ctrl.c
2464
return uvc_mapping_get_xctrl_compound(chain, ctrl, mapping,
drivers/media/usb/uvc/uvc_ctrl.c
2466
return uvc_mapping_get_xctrl_std(chain, ctrl, mapping, which, xctrl);
drivers/media/usb/uvc/uvc_ctrl.c
2473
struct uvc_control_mapping *mapping;
drivers/media/usb/uvc/uvc_ctrl.c
2478
ctrl = uvc_find_control(chain, xctrl->id, &mapping);
drivers/media/usb/uvc/uvc_ctrl.c
2482
return uvc_mapping_get_xctrl(chain, ctrl, mapping, which, xctrl);
drivers/media/usb/uvc/uvc_ctrl.c
2488
struct uvc_control_mapping *mapping,
drivers/media/usb/uvc/uvc_ctrl.c
2497
switch (mapping->v4l2_type) {
drivers/media/usb/uvc/uvc_ctrl.c
2505
max = uvc_mapping_get_s32(mapping, UVC_GET_MAX,
drivers/media/usb/uvc/uvc_ctrl.c
2520
min = uvc_mapping_get_s32(mapping, UVC_GET_MIN,
drivers/media/usb/uvc/uvc_ctrl.c
2523
step = uvc_mapping_get_s32(mapping, UVC_GET_RES,
drivers/media/usb/uvc/uvc_ctrl.c
2529
if (mapping->data_type == UVC_CTRL_DATA_TYPE_SIGNED)
drivers/media/usb/uvc/uvc_ctrl.c
2543
value &= uvc_get_ctrl_bitmap(ctrl, mapping);
drivers/media/usb/uvc/uvc_ctrl.c
2552
if (value < (ffs(mapping->menu_mask) - 1) ||
drivers/media/usb/uvc/uvc_ctrl.c
2553
value > (fls(mapping->menu_mask) - 1))
drivers/media/usb/uvc/uvc_ctrl.c
2556
if (!test_bit(value, &mapping->menu_mask))
drivers/media/usb/uvc/uvc_ctrl.c
2563
if (mapping->data_type == UVC_CTRL_DATA_TYPE_BITMASK) {
drivers/media/usb/uvc/uvc_ctrl.c
2564
int val = uvc_mapping_get_menu_value(mapping, value);
drivers/media/usb/uvc/uvc_ctrl.c
2571
if (!(uvc_get_ctrl_bitmap(ctrl, mapping) & val))
drivers/media/usb/uvc/uvc_ctrl.c
2584
struct uvc_control_mapping *mapping,
drivers/media/usb/uvc/uvc_ctrl.c
2588
size_t size = uvc_mapping_v4l2_size(mapping);
drivers/media/usb/uvc/uvc_ctrl.c
2602
return mapping->set(mapping, size, data,
drivers/media/usb/uvc/uvc_ctrl.c
2607
struct uvc_control_mapping *mapping,
drivers/media/usb/uvc/uvc_ctrl.c
2610
if (uvc_ctrl_mapping_is_compound(mapping))
drivers/media/usb/uvc/uvc_ctrl.c
2611
return uvc_mapping_set_xctrl_compound(ctrl, mapping, xctrl);
drivers/media/usb/uvc/uvc_ctrl.c
2613
uvc_mapping_set_s32(mapping, xctrl->value,
drivers/media/usb/uvc/uvc_ctrl.c
2621
struct uvc_control_mapping *mapping;
drivers/media/usb/uvc/uvc_ctrl.c
2630
ctrl = uvc_find_control(chain, xctrl->id, &mapping);
drivers/media/usb/uvc/uvc_ctrl.c
2636
ret = uvc_ctrl_clamp(chain, ctrl, xctrl->id, mapping, &xctrl->value);
drivers/media/usb/uvc/uvc_ctrl.c
2644
if ((ctrl->info.size * 8) != mapping->size) {
drivers/media/usb/uvc/uvc_ctrl.c
2657
ret = uvc_mapping_set_xctrl(ctrl, mapping, xctrl);
drivers/media/usb/uvc/uvc_ctrl.c
3028
struct uvc_control *ctrl, const struct uvc_control_mapping *mapping)
drivers/media/usb/uvc/uvc_ctrl.c
3040
map = kmemdup(mapping, sizeof(*mapping), GFP_KERNEL);
drivers/media/usb/uvc/uvc_ctrl.c
3049
if (mapping->name) {
drivers/media/usb/uvc/uvc_ctrl.c
3050
map->name = kstrdup(mapping->name, GFP_KERNEL);
drivers/media/usb/uvc/uvc_ctrl.c
3057
if (mapping->menu_mapping && mapping->menu_mask) {
drivers/media/usb/uvc/uvc_ctrl.c
3058
size = sizeof(mapping->menu_mapping[0])
drivers/media/usb/uvc/uvc_ctrl.c
3059
* fls(mapping->menu_mask);
drivers/media/usb/uvc/uvc_ctrl.c
3060
map->menu_mapping = kmemdup(mapping->menu_mapping, size,
drivers/media/usb/uvc/uvc_ctrl.c
3065
if (mapping->menu_names && mapping->menu_mask) {
drivers/media/usb/uvc/uvc_ctrl.c
3066
size = sizeof(mapping->menu_names[0])
drivers/media/usb/uvc/uvc_ctrl.c
3067
* fls(mapping->menu_mask);
drivers/media/usb/uvc/uvc_ctrl.c
3068
map->menu_names = kmemdup(mapping->menu_names, size,
drivers/media/usb/uvc/uvc_ctrl.c
3111
const struct uvc_control_mapping *mapping)
drivers/media/usb/uvc/uvc_ctrl.c
3120
if (mapping->id & ~V4L2_CTRL_ID_MASK) {
drivers/media/usb/uvc/uvc_ctrl.c
3123
uvc_map_get_name(mapping), mapping->id);
drivers/media/usb/uvc/uvc_ctrl.c
3132
!uvc_entity_match_guid(entity, mapping->entity))
drivers/media/usb/uvc/uvc_ctrl.c
3137
if (ctrl->index == mapping->selector - 1) {
drivers/media/usb/uvc/uvc_ctrl.c
3160
if (mapping->size > 32 ||
drivers/media/usb/uvc/uvc_ctrl.c
3161
mapping->offset + mapping->size > ctrl->info.size * 8) {
drivers/media/usb/uvc/uvc_ctrl.c
3167
if (mapping->id == map->id) {
drivers/media/usb/uvc/uvc_ctrl.c
3170
uvc_map_get_name(mapping), mapping->id);
drivers/media/usb/uvc/uvc_ctrl.c
3181
uvc_map_get_name(mapping), UVC_MAX_CONTROL_MAPPINGS);
drivers/media/usb/uvc/uvc_ctrl.c
3186
ret = __uvc_ctrl_add_mapping(chain, ctrl, mapping);
drivers/media/usb/uvc/uvc_ctrl.c
3289
const struct uvc_control_mapping *mapping = &uvc_ctrl_mappings[i];
drivers/media/usb/uvc/uvc_ctrl.c
3291
if (!uvc_entity_match_guid(ctrl->entity, mapping->entity) ||
drivers/media/usb/uvc/uvc_ctrl.c
3292
ctrl->info.selector != mapping->selector)
drivers/media/usb/uvc/uvc_ctrl.c
3296
if (mapping->filter_mapping) {
drivers/media/usb/uvc/uvc_ctrl.c
3297
mapping = mapping->filter_mapping(chain, ctrl);
drivers/media/usb/uvc/uvc_ctrl.c
3298
if (!mapping)
drivers/media/usb/uvc/uvc_ctrl.c
3302
__uvc_ctrl_add_mapping(chain, ctrl, mapping);
drivers/media/usb/uvc/uvc_ctrl.c
3410
struct uvc_control_mapping *mapping, *nm;
drivers/media/usb/uvc/uvc_ctrl.c
3412
list_for_each_entry_safe(mapping, nm, &ctrl->info.mappings, list) {
drivers/media/usb/uvc/uvc_ctrl.c
3413
list_del(&mapping->list);
drivers/media/usb/uvc/uvc_ctrl.c
3414
kfree(mapping->menu_names);
drivers/media/usb/uvc/uvc_ctrl.c
3415
kfree(mapping->menu_mapping);
drivers/media/usb/uvc/uvc_ctrl.c
3416
kfree(mapping->name);
drivers/media/usb/uvc/uvc_ctrl.c
3417
kfree(mapping);
drivers/media/usb/uvc/uvc_ctrl.c
410
static bool uvc_ctrl_mapping_is_compound(struct uvc_control_mapping *mapping)
drivers/media/usb/uvc/uvc_ctrl.c
412
return mapping->v4l2_type >= V4L2_CTRL_COMPOUND_TYPES;
drivers/media/usb/uvc/uvc_ctrl.c
415
static s32 uvc_mapping_get_s32(struct uvc_control_mapping *mapping,
drivers/media/usb/uvc/uvc_ctrl.c
420
mapping->get(mapping, query, data_in, sizeof(data_out), &data_out);
drivers/media/usb/uvc/uvc_ctrl.c
425
static void uvc_mapping_set_s32(struct uvc_control_mapping *mapping,
drivers/media/usb/uvc/uvc_ctrl.c
428
mapping->set(mapping, sizeof(data_in), &data_in, data_out);
drivers/media/usb/uvc/uvc_ctrl.c
445
static int uvc_mapping_get_menu_value(const struct uvc_control_mapping *mapping,
drivers/media/usb/uvc/uvc_ctrl.c
448
if (!test_bit(idx, &mapping->menu_mask))
drivers/media/usb/uvc/uvc_ctrl.c
451
if (mapping->menu_mapping)
drivers/media/usb/uvc/uvc_ctrl.c
452
return mapping->menu_mapping[idx];
drivers/media/usb/uvc/uvc_ctrl.c
458
uvc_mapping_get_menu_name(const struct uvc_control_mapping *mapping, u32 idx)
drivers/media/usb/uvc/uvc_ctrl.c
460
if (!test_bit(idx, &mapping->menu_mask))
drivers/media/usb/uvc/uvc_ctrl.c
463
if (mapping->menu_names)
drivers/media/usb/uvc/uvc_ctrl.c
464
return mapping->menu_names[idx];
drivers/media/usb/uvc/uvc_ctrl.c
466
return v4l2_ctrl_get_menu(mapping->id)[idx];
drivers/media/usb/uvc/uvc_ctrl.c
469
static int uvc_ctrl_get_zoom(struct uvc_control_mapping *mapping, u8 query,
drivers/media/usb/uvc/uvc_ctrl.c
496
static int uvc_ctrl_set_zoom(struct uvc_control_mapping *mapping,
drivers/media/usb/uvc/uvc_ctrl.c
513
static int uvc_ctrl_get_rel_speed(struct uvc_control_mapping *mapping,
drivers/media/usb/uvc/uvc_ctrl.c
517
unsigned int first = mapping->offset / 8;
drivers/media/usb/uvc/uvc_ctrl.c
540
static int uvc_ctrl_set_rel_speed(struct uvc_control_mapping *mapping,
drivers/media/usb/uvc/uvc_ctrl.c
544
unsigned int first = mapping->offset / 8;
drivers/media/usb/uvc/uvc_ctrl.c
646
static int uvc_get_rect(struct uvc_control_mapping *mapping, u8 query,
drivers/media/usb/uvc/uvc_ctrl.c
667
static int uvc_set_rect(struct uvc_control_mapping *mapping, size_t v4l2_size,
drivers/media/usb/uvc/uvcvideo.h
146
int (*get)(struct uvc_control_mapping *mapping, u8 query,
drivers/media/usb/uvc/uvcvideo.h
148
int (*set)(struct uvc_control_mapping *mapping, size_t v4l2_size,
drivers/media/usb/uvc/uvcvideo.h
764
const struct uvc_control_mapping *mapping);
drivers/misc/ocxl/afu_irq.c
157
if (ctx->mapping)
drivers/misc/ocxl/afu_irq.c
158
unmap_mapping_range(ctx->mapping,
drivers/misc/ocxl/context.c
276
if (ctx->mapping)
drivers/misc/ocxl/context.c
277
unmap_mapping_range(ctx->mapping, 0, 0, 1);
drivers/misc/ocxl/context.c
32
ctx->mapping = mapping;
drivers/misc/ocxl/context.c
8
struct address_space *mapping)
drivers/misc/ocxl/file.c
464
ctx->mapping = NULL;
drivers/misc/ocxl/ocxl_internal.h
75
struct address_space *mapping;
drivers/misc/uacce/uacce.c
187
q->mapping = filep->f_mapping;
drivers/misc/uacce/uacce.c
600
unmap_mapping_range(q->mapping, 0, 0, 1);
drivers/mtd/devices/block2mtd.c
108
struct address_space *mapping = dev->bdev_file->f_mapping;
drivers/mtd/devices/block2mtd.c
121
page = page_read(mapping, index);
drivers/mtd/devices/block2mtd.c
143
struct address_space *mapping = dev->bdev_file->f_mapping;
drivers/mtd/devices/block2mtd.c
155
page = page_read(mapping, index);
drivers/mtd/devices/block2mtd.c
164
balance_dirty_pages_ratelimited(mapping);
drivers/mtd/devices/block2mtd.c
50
static struct page *page_read(struct address_space *mapping, pgoff_t index)
drivers/mtd/devices/block2mtd.c
52
return read_mapping_page(mapping, index, NULL);
drivers/mtd/devices/block2mtd.c
58
struct address_space *mapping = dev->bdev_file->f_mapping;
drivers/mtd/devices/block2mtd.c
66
page = page_read(mapping, index);
drivers/mtd/devices/block2mtd.c
77
balance_dirty_pages_ratelimited(mapping);
drivers/mtd/nand/raw/nandsim.c
1314
struct address_space *mapping = file->f_mapping;
drivers/mtd/nand/raw/nandsim.c
1322
page = find_get_page(mapping, index);
drivers/mtd/nand/raw/nandsim.c
1324
page = find_or_create_page(mapping, index, GFP_NOFS);
drivers/mtd/nand/raw/nandsim.c
1326
write_inode_now(mapping->host, 1);
drivers/mtd/nand/raw/nandsim.c
1327
page = find_or_create_page(mapping, index, GFP_NOFS);
drivers/net/dsa/mv88e6xxx/chip.c
2309
enum mv88e6xxx_policy_mapping mapping = policy->mapping;
drivers/net/dsa/mv88e6xxx/chip.c
2320
switch (mapping) {
drivers/net/dsa/mv88e6xxx/chip.c
2347
policy->mapping == mapping &&
drivers/net/dsa/mv88e6xxx/chip.c
2351
return chip->info->ops->port_set_policy(chip, port, mapping, action);
drivers/net/dsa/mv88e6xxx/chip.c
2359
enum mv88e6xxx_policy_mapping mapping;
drivers/net/dsa/mv88e6xxx/chip.c
2379
mapping = MV88E6XXX_POLICY_MAPPING_DA;
drivers/net/dsa/mv88e6xxx/chip.c
2383
mapping = MV88E6XXX_POLICY_MAPPING_SA;
drivers/net/dsa/mv88e6xxx/chip.c
2401
if (policy->port == port && policy->mapping == mapping &&
drivers/net/dsa/mv88e6xxx/chip.c
2421
policy->mapping = mapping;
drivers/net/dsa/mv88e6xxx/chip.h
266
enum mv88e6xxx_policy_mapping mapping;
drivers/net/dsa/mv88e6xxx/chip.h
559
enum mv88e6xxx_policy_mapping mapping,
drivers/net/dsa/mv88e6xxx/port.c
1611
mv88e6xxx_port_policy_mapping_get_pos(enum mv88e6xxx_policy_mapping mapping,
drivers/net/dsa/mv88e6xxx/port.c
1615
switch (mapping) {
drivers/net/dsa/mv88e6xxx/port.c
1673
enum mv88e6xxx_policy_mapping mapping,
drivers/net/dsa/mv88e6xxx/port.c
1680
err = mv88e6xxx_port_policy_mapping_get_pos(mapping, action, &mask,
drivers/net/dsa/mv88e6xxx/port.c
1696
enum mv88e6xxx_policy_mapping mapping,
drivers/net/dsa/mv88e6xxx/port.c
1705
err = mv88e6xxx_port_policy_mapping_get_pos(mapping, action, &mask,
drivers/net/dsa/mv88e6xxx/port.h
545
enum mv88e6xxx_policy_mapping mapping,
drivers/net/dsa/mv88e6xxx/port.h
548
enum mv88e6xxx_policy_mapping mapping,
drivers/net/ethernet/adaptec/starfire.c
1135
np->rx_info[i].mapping = dma_map_single(&np->pci_dev->dev,
drivers/net/ethernet/adaptec/starfire.c
1139
if (dma_mapping_error(&np->pci_dev->dev, np->rx_info[i].mapping)) {
drivers/net/ethernet/adaptec/starfire.c
1145
np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
drivers/net/ethernet/adaptec/starfire.c
1154
np->rx_info[i].mapping = 0;
drivers/net/ethernet/adaptec/starfire.c
1217
np->tx_info[entry].mapping =
drivers/net/ethernet/adaptec/starfire.c
1224
np->tx_info[entry].mapping =
drivers/net/ethernet/adaptec/starfire.c
1230
if (dma_mapping_error(&np->pci_dev->dev, np->tx_info[entry].mapping)) {
drivers/net/ethernet/adaptec/starfire.c
1235
np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
drivers/net/ethernet/adaptec/starfire.c
1274
np->tx_info[entry].mapping,
drivers/net/ethernet/adaptec/starfire.c
1276
np->tx_info[entry].mapping = 0;
drivers/net/ethernet/adaptec/starfire.c
1280
np->tx_info[entry].mapping,
drivers/net/ethernet/adaptec/starfire.c
1357
np->tx_info[entry].mapping,
drivers/net/ethernet/adaptec/starfire.c
1360
np->tx_info[entry].mapping = 0;
drivers/net/ethernet/adaptec/starfire.c
1367
np->tx_info[entry].mapping,
drivers/net/ethernet/adaptec/starfire.c
1462
np->rx_info[entry].mapping,
drivers/net/ethernet/adaptec/starfire.c
1466
np->rx_info[entry].mapping,
drivers/net/ethernet/adaptec/starfire.c
1471
np->rx_info[entry].mapping,
drivers/net/ethernet/adaptec/starfire.c
1476
np->rx_info[entry].mapping = 0;
drivers/net/ethernet/adaptec/starfire.c
1589
np->rx_info[entry].mapping =
drivers/net/ethernet/adaptec/starfire.c
1592
if (dma_mapping_error(&np->pci_dev->dev, np->rx_info[entry].mapping)) {
drivers/net/ethernet/adaptec/starfire.c
1598
cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
drivers/net/ethernet/adaptec/starfire.c
1966
np->rx_info[i].mapping,
drivers/net/ethernet/adaptec/starfire.c
1971
np->rx_info[i].mapping = 0;
drivers/net/ethernet/adaptec/starfire.c
1977
dma_unmap_single(&np->pci_dev->dev, np->tx_info[i].mapping,
drivers/net/ethernet/adaptec/starfire.c
1979
np->tx_info[i].mapping = 0;
drivers/net/ethernet/adaptec/starfire.c
506
dma_addr_t mapping;
drivers/net/ethernet/adaptec/starfire.c
510
dma_addr_t mapping;
drivers/net/ethernet/alteon/acenic.c
1636
dma_addr_t mapping;
drivers/net/ethernet/alteon/acenic.c
1642
mapping = dma_map_page(&ap->pdev->dev,
drivers/net/ethernet/alteon/acenic.c
1648
mapping, mapping);
drivers/net/ethernet/alteon/acenic.c
1651
set_aceaddr(&rd->addr, mapping);
drivers/net/ethernet/alteon/acenic.c
1697
dma_addr_t mapping;
drivers/net/ethernet/alteon/acenic.c
1703
mapping = dma_map_page(&ap->pdev->dev,
drivers/net/ethernet/alteon/acenic.c
1709
mapping, mapping);
drivers/net/ethernet/alteon/acenic.c
1712
set_aceaddr(&rd->addr, mapping);
drivers/net/ethernet/alteon/acenic.c
1753
dma_addr_t mapping;
drivers/net/ethernet/alteon/acenic.c
1759
mapping = dma_map_page(&ap->pdev->dev,
drivers/net/ethernet/alteon/acenic.c
1765
mapping, mapping);
drivers/net/ethernet/alteon/acenic.c
1768
set_aceaddr(&rd->addr, mapping);
drivers/net/ethernet/alteon/acenic.c
1973
dma_unmap_page(&ap->pdev->dev, dma_unmap_addr(rip, mapping),
drivers/net/ethernet/alteon/acenic.c
2041
dma_unmap_addr(info, mapping),
drivers/net/ethernet/alteon/acenic.c
2333
dma_unmap_addr(info, mapping),
drivers/net/ethernet/alteon/acenic.c
2362
dma_addr_t mapping;
drivers/net/ethernet/alteon/acenic.c
2365
mapping = dma_map_page(&ap->pdev->dev, virt_to_page(skb->data),
drivers/net/ethernet/alteon/acenic.c
2371
dma_unmap_addr_set(info, mapping, mapping);
drivers/net/ethernet/alteon/acenic.c
2373
return mapping;
drivers/net/ethernet/alteon/acenic.c
2416
dma_addr_t mapping;
drivers/net/ethernet/alteon/acenic.c
2419
mapping = ace_map_tx_skb(ap, skb, skb, idx);
drivers/net/ethernet/alteon/acenic.c
2434
ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag);
drivers/net/ethernet/alteon/acenic.c
2436
dma_addr_t mapping;
drivers/net/ethernet/alteon/acenic.c
2440
mapping = ace_map_tx_skb(ap, skb, NULL, idx);
drivers/net/ethernet/alteon/acenic.c
2449
ace_load_tx_bd(ap, ap->tx_ring + idx, mapping, flagsize, vlan_tag);
drivers/net/ethernet/alteon/acenic.c
2460
mapping = skb_frag_dma_map(&ap->pdev->dev, frag, 0,
drivers/net/ethernet/alteon/acenic.c
2482
dma_unmap_addr_set(info, mapping, mapping);
drivers/net/ethernet/alteon/acenic.c
2484
ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag);
drivers/net/ethernet/alteon/acenic.c
641
dma_addr_t mapping;
drivers/net/ethernet/alteon/acenic.c
644
mapping = dma_unmap_addr(ringp, mapping);
drivers/net/ethernet/alteon/acenic.c
645
dma_unmap_page(&ap->pdev->dev, mapping,
drivers/net/ethernet/alteon/acenic.c
660
dma_addr_t mapping;
drivers/net/ethernet/alteon/acenic.c
663
mapping = dma_unmap_addr(ringp,mapping);
drivers/net/ethernet/alteon/acenic.c
664
dma_unmap_page(&ap->pdev->dev, mapping,
drivers/net/ethernet/alteon/acenic.c
679
dma_addr_t mapping;
drivers/net/ethernet/alteon/acenic.c
682
mapping = dma_unmap_addr(ringp, mapping);
drivers/net/ethernet/alteon/acenic.c
683
dma_unmap_page(&ap->pdev->dev, mapping,
drivers/net/ethernet/alteon/acenic.h
594
DEFINE_DMA_UNMAP_ADDR(mapping);
drivers/net/ethernet/alteon/acenic.h
605
DEFINE_DMA_UNMAP_ADDR(mapping);
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
1758
dma_addr_t mapping;
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
1799
mapping = dma_map_single(&pdev->dev, vir_addr,
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
1801
if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
1809
buffer_info->dma = mapping;
drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
240
dma_addr_t mapping, valid;
drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
273
mapping = dma_map_single(kdev, skb->data, size,
drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
278
mapping = skb_frag_dma_map(kdev, frag, 0, size,
drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
282
if (dma_mapping_error(kdev, mapping)) {
drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
301
dma_unmap_addr_set(txcb, dma_addr, mapping);
drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
314
desc->buf = mapping;
drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
320
__func__, &mapping, desc->size, desc->flags,
drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
417
dma_addr_t mapping;
drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
422
mapping = dma_unmap_addr(txcb, dma_addr);
drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
424
dma_unmap_single(kdev, mapping,
drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
441
__func__, &mapping, desc->size, desc->flags,
drivers/net/ethernet/broadcom/b44.c
1004
bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
drivers/net/ethernet/broadcom/b44.c
1079
dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
drivers/net/ethernet/broadcom/b44.c
1091
dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
drivers/net/ethernet/broadcom/b44.c
608
rp->mapping,
drivers/net/ethernet/broadcom/b44.c
639
dma_addr_t mapping;
drivers/net/ethernet/broadcom/b44.c
652
mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
drivers/net/ethernet/broadcom/b44.c
658
if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
drivers/net/ethernet/broadcom/b44.c
659
mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
drivers/net/ethernet/broadcom/b44.c
661
if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
drivers/net/ethernet/broadcom/b44.c
662
dma_unmap_single(bp->sdev->dma_dev, mapping,
drivers/net/ethernet/broadcom/b44.c
668
mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
drivers/net/ethernet/broadcom/b44.c
671
if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
drivers/net/ethernet/broadcom/b44.c
672
mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
drivers/net/ethernet/broadcom/b44.c
673
if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
drivers/net/ethernet/broadcom/b44.c
674
dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
drivers/net/ethernet/broadcom/b44.c
687
map->mapping = mapping;
drivers/net/ethernet/broadcom/b44.c
698
dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset);
drivers/net/ethernet/broadcom/b44.c
726
dest_map->mapping = src_map->mapping;
drivers/net/ethernet/broadcom/b44.c
749
dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping,
drivers/net/ethernet/broadcom/b44.c
767
dma_addr_t map = rp->mapping;
drivers/net/ethernet/broadcom/b44.c
952
dma_addr_t mapping;
drivers/net/ethernet/broadcom/b44.c
966
mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
drivers/net/ethernet/broadcom/b44.c
967
if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
drivers/net/ethernet/broadcom/b44.c
971
if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
drivers/net/ethernet/broadcom/b44.c
972
dma_unmap_single(bp->sdev->dma_dev, mapping, len,
drivers/net/ethernet/broadcom/b44.c
979
mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
drivers/net/ethernet/broadcom/b44.c
981
if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
drivers/net/ethernet/broadcom/b44.c
982
if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
drivers/net/ethernet/broadcom/b44.c
983
dma_unmap_single(bp->sdev->dma_dev, mapping,
drivers/net/ethernet/broadcom/b44.c
996
bp->tx_buffers[entry].mapping = mapping;
drivers/net/ethernet/broadcom/b44.h
283
dma_addr_t mapping;
drivers/net/ethernet/broadcom/bcmsysport.c
1294
dma_addr_t mapping;
drivers/net/ethernet/broadcom/bcmsysport.c
1322
mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
drivers/net/ethernet/broadcom/bcmsysport.c
1323
if (dma_mapping_error(kdev, mapping)) {
drivers/net/ethernet/broadcom/bcmsysport.c
1335
dma_unmap_addr_set(cb, dma_addr, mapping);
drivers/net/ethernet/broadcom/bcmsysport.c
1338
addr_lo = lower_32_bits(mapping);
drivers/net/ethernet/broadcom/bcmsysport.c
1339
len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK;
drivers/net/ethernet/broadcom/bcmsysport.c
667
dma_addr_t mapping;
drivers/net/ethernet/broadcom/bcmsysport.c
678
mapping = dma_map_single(kdev, skb->data,
drivers/net/ethernet/broadcom/bcmsysport.c
680
if (dma_mapping_error(kdev, mapping)) {
drivers/net/ethernet/broadcom/bcmsysport.c
695
dma_unmap_addr_set(cb, dma_addr, mapping);
drivers/net/ethernet/broadcom/bcmsysport.c
696
dma_desc_set_addr(priv, cb->bd_addr, mapping);
drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
1044
dma_addr_t *mapping,
drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
1061
*mapping = page_pool_get_dma_addr_netmem(netmem) + *offset;
drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
1065
u8 *__bnge_alloc_rx_frag(struct bnge_net *bn, dma_addr_t *mapping,
drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
1077
*mapping = page_pool_get_dma_addr(page) + bn->rx_dma_offset + offset;
drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
1086
dma_addr_t mapping;
drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
1090
data = __bnge_alloc_rx_frag(bn, &mapping, rxr, gfp);
drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
1096
rx_buf->mapping = mapping;
drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
1098
rxbd->rx_bd_haddr = cpu_to_le64(mapping);
drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
1152
dma_addr_t mapping;
drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
1156
netmem = __bnge_alloc_rx_netmem(bn, &mapping, rxr, &offset, gfp);
drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
1169
rx_agg_buf->mapping = mapping;
drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
1170
rxbd->rx_bd_haddr = cpu_to_le64(mapping);
drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
1212
dma_addr_t mapping;
drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
1217
data = __bnge_alloc_rx_frag(bn, &mapping, rxr,
drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
1224
rxr->rx_tpa[i].mapping = mapping;
drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
451
dma_unmap_addr(tx_buf, mapping),
drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
463
dma_unmap_addr(tx_buf, mapping),
drivers/net/ethernet/broadcom/bnge/bnge_netdev.h
101
DEFINE_DMA_UNMAP_ADDR(mapping);
drivers/net/ethernet/broadcom/bnge/bnge_netdev.h
117
dma_addr_t mapping;
drivers/net/ethernet/broadcom/bnge/bnge_netdev.h
123
dma_addr_t mapping;
drivers/net/ethernet/broadcom/bnge/bnge_netdev.h
179
dma_addr_t mapping;
drivers/net/ethernet/broadcom/bnge/bnge_netdev.h
565
u8 *__bnge_alloc_rx_frag(struct bnge_net *bn, dma_addr_t *mapping,
drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
1070
dma_unmap_single(bd->dev, dma_unmap_addr(tx_buf, mapping),
drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
1080
mapping),
drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
111
prod_rx_buf->mapping = cons_rx_buf->mapping;
drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
116
prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
1445
dma_addr_t mapping;
drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
1494
mapping = dma_map_single(bd->dev, skb->data, len, DMA_TO_DEVICE);
drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
1496
if (unlikely(dma_mapping_error(bd->dev, mapping)))
drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
1499
dma_unmap_addr_set(tx_buf, mapping, mapping);
drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
1503
txbd->tx_bd_haddr = cpu_to_le64(mapping);
drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
1541
mapping = skb_frag_dma_map(bd->dev, frag, 0, len,
drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
1544
if (unlikely(dma_mapping_error(bd->dev, mapping)))
drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
1549
mapping, mapping);
drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
1551
txbd->tx_bd_haddr = cpu_to_le64(mapping);
drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
1594
dma_unmap_single(bd->dev, dma_unmap_addr(tx_buf, mapping),
drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
1604
dma_unmap_addr(tx_buf, mapping),
drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
347
dma_addr_t mapping;
drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
374
mapping = tpa_info->mapping;
drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
375
prod_rx_buf->mapping = mapping;
drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
379
prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
384
tpa_info->mapping = cons_rx_buf->mapping;
drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
460
prod_rx_buf->mapping = cons_rx_buf->mapping;
drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
477
unsigned int len, dma_addr_t mapping)
drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
487
dma_sync_single_for_cpu(bd->dev, mapping, len, bn->rx_dir);
drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
492
dma_sync_single_for_device(bd->dev, mapping, len, bn->rx_dir);
drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
592
dma_addr_t mapping;
drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
622
mapping = tpa_info->mapping;
drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
633
skb = bnge_copy_skb(bnapi, data_ptr, len, mapping);
drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
651
tpa_info->mapping = new_mapping;
drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
654
dma_sync_single_for_cpu(bn->bd->dev, mapping,
drivers/net/ethernet/broadcom/bnge/bnge_txrx.c
920
dma_addr = rx_buf->mapping;
drivers/net/ethernet/broadcom/bnx2.c
2724
dma_addr_t mapping;
drivers/net/ethernet/broadcom/bnx2.c
2732
mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
drivers/net/ethernet/broadcom/bnx2.c
2734
if (dma_mapping_error(&bp->pdev->dev, mapping)) {
drivers/net/ethernet/broadcom/bnx2.c
2740
dma_unmap_addr_set(rx_pg, mapping, mapping);
drivers/net/ethernet/broadcom/bnx2.c
2741
rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
drivers/net/ethernet/broadcom/bnx2.c
2742
rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
drivers/net/ethernet/broadcom/bnx2.c
2755
dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
drivers/net/ethernet/broadcom/bnx2.c
2767
dma_addr_t mapping;
drivers/net/ethernet/broadcom/bnx2.c
2775
mapping = dma_map_single(&bp->pdev->dev,
drivers/net/ethernet/broadcom/bnx2.c
2779
if (dma_mapping_error(&bp->pdev->dev, mapping)) {
drivers/net/ethernet/broadcom/bnx2.c
2785
dma_unmap_addr_set(rx_buf, mapping, mapping);
drivers/net/ethernet/broadcom/bnx2.c
2787
rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
drivers/net/ethernet/broadcom/bnx2.c
2788
rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
drivers/net/ethernet/broadcom/bnx2.c
2883
dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
drivers/net/ethernet/broadcom/bnx2.c
2896
dma_unmap_addr(tx_buf, mapping),
drivers/net/ethernet/broadcom/bnx2.c
2979
dma_unmap_addr_set(prod_rx_pg, mapping,
drivers/net/ethernet/broadcom/bnx2.c
2980
dma_unmap_addr(cons_rx_pg, mapping));
drivers/net/ethernet/broadcom/bnx2.c
3004
dma_unmap_addr(cons_rx_buf, mapping),
drivers/net/ethernet/broadcom/bnx2.c
3014
dma_unmap_addr_set(prod_rx_buf, mapping,
drivers/net/ethernet/broadcom/bnx2.c
3015
dma_unmap_addr(cons_rx_buf, mapping));
drivers/net/ethernet/broadcom/bnx2.c
3093
mapping_old = dma_unmap_addr(rx_pg, mapping);
drivers/net/ethernet/broadcom/bnx2.c
3178
dma_addr = dma_unmap_addr(rx_buf, mapping);
drivers/net/ethernet/broadcom/bnx2.c
5435
dma_unmap_addr(tx_buf, mapping),
drivers/net/ethernet/broadcom/bnx2.c
5446
dma_unmap_addr(tx_buf, mapping),
drivers/net/ethernet/broadcom/bnx2.c
5477
dma_unmap_addr(rx_buf, mapping),
drivers/net/ethernet/broadcom/bnx2.c
5888
dma_unmap_addr(rx_buf, mapping),
drivers/net/ethernet/broadcom/bnx2.c
6576
dma_addr_t mapping;
drivers/net/ethernet/broadcom/bnx2.c
6647
mapping = dma_map_single(&bp->pdev->dev, skb->data, len,
drivers/net/ethernet/broadcom/bnx2.c
6649
if (dma_mapping_error(&bp->pdev->dev, mapping)) {
drivers/net/ethernet/broadcom/bnx2.c
6656
dma_unmap_addr_set(tx_buf, mapping, mapping);
drivers/net/ethernet/broadcom/bnx2.c
6660
txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
drivers/net/ethernet/broadcom/bnx2.c
6661
txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
drivers/net/ethernet/broadcom/bnx2.c
6677
mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
drivers/net/ethernet/broadcom/bnx2.c
6679
if (dma_mapping_error(&bp->pdev->dev, mapping))
drivers/net/ethernet/broadcom/bnx2.c
6681
dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
drivers/net/ethernet/broadcom/bnx2.c
6682
mapping);
drivers/net/ethernet/broadcom/bnx2.c
6684
txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
drivers/net/ethernet/broadcom/bnx2.c
6685
txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
drivers/net/ethernet/broadcom/bnx2.c
6728
dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
drivers/net/ethernet/broadcom/bnx2.c
6736
dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
drivers/net/ethernet/broadcom/bnx2.h
6625
DEFINE_DMA_UNMAP_ADDR(mapping);
drivers/net/ethernet/broadcom/bnx2.h
6639
DEFINE_DMA_UNMAP_ADDR(mapping);
drivers/net/ethernet/broadcom/bnx2.h
6644
DEFINE_DMA_UNMAP_ADDR(mapping);
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
2034
void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
359
DEFINE_DMA_UNMAP_ADDR(mapping);
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
373
DEFINE_DMA_UNMAP_ADDR(mapping);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
1027
dma_unmap_addr(rx_buf, mapping),
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
1059
dma_unmap_addr(rx_buf, mapping),
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
1385
dma_unmap_addr(first_buf, mapping),
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
1441
dma_unmap_addr_set(first_buf, mapping, 0);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
1562
dma_unmap_addr(rx_buf, mapping),
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
3311
dma_addr_t mapping;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
3325
mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
3328
d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
3329
d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
3766
dma_addr_t mapping;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
3846
mapping = dma_map_single(&bp->pdev->dev, skb->data,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
3848
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
4044
tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
4045
tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
4092
mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
4094
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
4117
tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
4118
tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
438
dma_addr_t mapping;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
447
mapping = dma_map_single(&bp->pdev->dev,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
456
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
465
dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
467
prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
468
prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
553
dma_addr_t mapping;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
563
mapping = dma_map_page(&bp->pdev->dev, pool->page,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
565
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
573
dma_unmap_addr_set(sw_buf, mapping, mapping);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
575
sge->addr_hi = cpu_to_le32(U64_HI(mapping));
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
576
sge->addr_lo = cpu_to_le32(U64_LO(mapping));
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
645
dma_unmap_addr(&old_rx_pg, mapping),
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
792
dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
846
dma_addr_t mapping;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
852
mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
855
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
862
dma_unmap_addr_set(rx_buf, mapping, mapping);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
864
rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
865
rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
813
dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
895
dma_unmap_addr_set(prod_rx_buf, mapping,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
896
dma_unmap_addr(cons_rx_buf, mapping));
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
2505
dma_addr_t mapping;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
2565
mapping = dma_map_single(&bp->pdev->dev, skb->data,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
2567
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
2589
tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
2590
tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
2670
dma_unmap_addr(rx_buf, mapping),
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
318
u32 addr, dma_addr_t mapping)
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
320
REG_WR(bp, addr, U64_LO(mapping));
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
321
REG_WR(bp, addr + 4, U64_HI(mapping));
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
325
dma_addr_t mapping, u16 abs_fid)
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
330
__storm_memset_dma_mapping(bp, addr, mapping);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
5962
void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
5988
sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
5989
sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
6003
sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
6004
sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
6041
dma_addr_t mapping = bp->def_status_blk_mapping;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
6061
section = ((u64)mapping) + offsetof(struct host_sp_status_block,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
6101
section = ((u64)mapping) + offsetof(struct host_sp_status_block,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
1335
BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
1339
BP_VFDB(bp)->sp_dma.mapping,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
1343
BP_VF_MBX_DMA(bp)->mapping,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
1347
BP_VF_BULLETIN_DMA(bp)->mapping,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
1368
cxt->addr = BNX2X_PCI_ALLOC(&cxt->mapping, cxt->size);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
1373
cxt->mapping = 0;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
1380
BP_VFDB(bp)->sp_dma.addr = BNX2X_PCI_ALLOC(&BP_VFDB(bp)->sp_dma.mapping,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
1388
BP_VF_MBX_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_MBX_DMA(bp)->mapping,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
1397
BP_VF_BULLETIN_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_BULLETIN_DMA(bp)->mapping,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
1597
BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping +
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
1654
ilt->lines[line+i].page_mapping = hw_cxt->mapping;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
313
dma_addr_t mapping;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
349
#define bnx2x_vf_sp_map(bp, vf, field) ((bp)->vfdb->sp_dma.mapping + \
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
2292
dma_addr_t pf_addr = BP_VF_BULLETIN_DMA(bp)->mapping +
drivers/net/ethernet/broadcom/bnxt/bnxt.c
1012
prod_rx_buf->mapping = cons_rx_buf->mapping;
drivers/net/ethernet/broadcom/bnxt/bnxt.c
1038
dma_addr_t mapping;
drivers/net/ethernet/broadcom/bnxt/bnxt.c
1041
netmem = __bnxt_alloc_rx_netmem(bp, &mapping, rxr, &offset, gfp);
drivers/net/ethernet/broadcom/bnxt/bnxt.c
1054
rx_agg_buf->mapping = mapping;
drivers/net/ethernet/broadcom/bnxt/bnxt.c
1055
rxbd->rx_bd_haddr = cpu_to_le64(mapping);
drivers/net/ethernet/broadcom/bnxt/bnxt.c
1124
prod_rx_buf->mapping = cons_rx_buf->mapping;
drivers/net/ethernet/broadcom/bnxt/bnxt.c
1128
prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
drivers/net/ethernet/broadcom/bnxt/bnxt.c
1392
dma_addr_t mapping)
drivers/net/ethernet/broadcom/bnxt/bnxt.c
1402
dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copybreak,
drivers/net/ethernet/broadcom/bnxt/bnxt.c
14040
dma_addr_t mapping;
drivers/net/ethernet/broadcom/bnxt/bnxt.c
14048
&mapping);
drivers/net/ethernet/broadcom/bnxt/bnxt.c
14054
req->host_dest_addr = cpu_to_le64(mapping);
drivers/net/ethernet/broadcom/bnxt/bnxt.c
1408
dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copybreak,
drivers/net/ethernet/broadcom/bnxt/bnxt.c
1418
dma_addr_t mapping)
drivers/net/ethernet/broadcom/bnxt/bnxt.c
1420
return bnxt_copy_data(bnapi, data, len, mapping);
drivers/net/ethernet/broadcom/bnxt/bnxt.c
1426
dma_addr_t mapping)
drivers/net/ethernet/broadcom/bnxt/bnxt.c
1436
skb = bnxt_copy_data(bnapi, data, len, mapping);
drivers/net/ethernet/broadcom/bnxt/bnxt.c
1546
dma_addr_t mapping;
drivers/net/ethernet/broadcom/bnxt/bnxt.c
1578
mapping = tpa_info->mapping;
drivers/net/ethernet/broadcom/bnxt/bnxt.c
1579
prod_rx_buf->mapping = mapping;
drivers/net/ethernet/broadcom/bnxt/bnxt.c
1583
prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
drivers/net/ethernet/broadcom/bnxt/bnxt.c
1588
tpa_info->mapping = cons_rx_buf->mapping;
drivers/net/ethernet/broadcom/bnxt/bnxt.c
1857
dma_addr_t mapping;
drivers/net/ethernet/broadcom/bnxt/bnxt.c
1904
mapping = tpa_info->mapping;
drivers/net/ethernet/broadcom/bnxt/bnxt.c
1915
skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
drivers/net/ethernet/broadcom/bnxt/bnxt.c
1935
tpa_info->mapping = new_mapping;
drivers/net/ethernet/broadcom/bnxt/bnxt.c
1938
dma_sync_single_for_cpu(&bp->pdev->dev, mapping,
drivers/net/ethernet/broadcom/bnxt/bnxt.c
2231
dma_addr = rx_buf->mapping;
drivers/net/ethernet/broadcom/bnxt/bnxt.c
3421
dma_unmap_addr(tx_buf, mapping),
drivers/net/ethernet/broadcom/bnxt/bnxt.c
3446
dma_unmap_addr(tx_buf, mapping),
drivers/net/ethernet/broadcom/bnxt/bnxt.c
3459
mapping),
drivers/net/ethernet/broadcom/bnxt/bnxt.c
4042
dma_addr_t mapping;
drivers/net/ethernet/broadcom/bnxt/bnxt.c
4055
mapping = txr->tx_push_mapping +
drivers/net/ethernet/broadcom/bnxt/bnxt.c
4057
txr->data_mapping = cpu_to_le64(mapping);
drivers/net/ethernet/broadcom/bnxt/bnxt.c
4452
dma_addr_t mapping;
drivers/net/ethernet/broadcom/bnxt/bnxt.c
4457
data = __bnxt_alloc_rx_frag(bp, &mapping, rxr,
drivers/net/ethernet/broadcom/bnxt/bnxt.c
4464
rxr->rx_tpa[i].mapping = mapping;
drivers/net/ethernet/broadcom/bnxt/bnxt.c
476
dma_addr_t mapping;
drivers/net/ethernet/broadcom/bnxt/bnxt.c
653
mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
drivers/net/ethernet/broadcom/bnxt/bnxt.c
655
if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
drivers/net/ethernet/broadcom/bnxt/bnxt.c
658
dma_unmap_addr_set(tx_buf, mapping, mapping);
drivers/net/ethernet/broadcom/bnxt/bnxt.c
662
txbd->tx_bd_haddr = cpu_to_le64(mapping);
drivers/net/ethernet/broadcom/bnxt/bnxt.c
719
mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
drivers/net/ethernet/broadcom/bnxt/bnxt.c
722
if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
drivers/net/ethernet/broadcom/bnxt/bnxt.c
727
mapping, mapping);
drivers/net/ethernet/broadcom/bnxt/bnxt.c
729
txbd->tx_bd_haddr = cpu_to_le64(mapping);
drivers/net/ethernet/broadcom/bnxt/bnxt.c
776
dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
drivers/net/ethernet/broadcom/bnxt/bnxt.c
786
dma_unmap_addr(tx_buf, mapping),
drivers/net/ethernet/broadcom/bnxt/bnxt.c
852
dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
drivers/net/ethernet/broadcom/bnxt/bnxt.c
862
mapping),
drivers/net/ethernet/broadcom/bnxt/bnxt.c
908
static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
drivers/net/ethernet/broadcom/bnxt/bnxt.c
925
*mapping = page_pool_get_dma_addr(page) + *offset;
drivers/net/ethernet/broadcom/bnxt/bnxt.c
929
static netmem_ref __bnxt_alloc_rx_netmem(struct bnxt *bp, dma_addr_t *mapping,
drivers/net/ethernet/broadcom/bnxt/bnxt.c
946
*mapping = page_pool_get_dma_addr_netmem(netmem) + *offset;
drivers/net/ethernet/broadcom/bnxt/bnxt.c
950
static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
drivers/net/ethernet/broadcom/bnxt/bnxt.c
962
*mapping = page_pool_get_dma_addr(page) + bp->rx_dma_offset + offset;
drivers/net/ethernet/broadcom/bnxt/bnxt.c
971
dma_addr_t mapping;
drivers/net/ethernet/broadcom/bnxt/bnxt.c
976
__bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
drivers/net/ethernet/broadcom/bnxt/bnxt.c
981
mapping += bp->rx_dma_offset;
drivers/net/ethernet/broadcom/bnxt/bnxt.c
985
u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, rxr, gfp);
drivers/net/ethernet/broadcom/bnxt/bnxt.c
993
rx_buf->mapping = mapping;
drivers/net/ethernet/broadcom/bnxt/bnxt.c
995
rxbd->rx_bd_haddr = cpu_to_le64(mapping);
drivers/net/ethernet/broadcom/bnxt/bnxt.h
1056
dma_addr_t mapping;
drivers/net/ethernet/broadcom/bnxt/bnxt.h
888
DEFINE_DMA_UNMAP_ADDR(mapping);
drivers/net/ethernet/broadcom/bnxt/bnxt.h
904
dma_addr_t mapping;
drivers/net/ethernet/broadcom/bnxt/bnxt.h
910
dma_addr_t mapping;
drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
328
dma_addr_t mapping;
drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
344
data = hwrm_req_dma_slice(bp, get, data_len, &mapping);
drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
350
get->dest_data_addr = cpu_to_le64(mapping);
drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
404
set->src_data_addr = cpu_to_le64(mapping);
drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
444
dma_addr_t mapping;
drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
454
dscp2pri = hwrm_req_dma_slice(bp, req, sizeof(*dscp2pri), &mapping);
drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
460
req->src_data_addr = cpu_to_le64(mapping);
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
101
tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, xdp);
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
109
dma_addr_t mapping, u32 len,
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
114
tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, NULL);
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
117
dma_unmap_addr_set(tx_buf, mapping, mapping);
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
142
dma_unmap_addr(tx_buf, mapping),
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
189
dma_addr_t mapping;
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
196
mapping = rx_buf->mapping - bp->rx_dma_offset;
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
197
dma_sync_single_for_cpu(&pdev->dev, mapping + offset, len, bp->rx_dir);
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
232
dma_addr_t mapping;
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
270
mapping = rx_buf->mapping - bp->rx_dma_offset;
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
28
dma_addr_t mapping, u32 len,
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
285
dma_sync_single_for_device(&pdev->dev, mapping + offset, *len,
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
290
__bnxt_xmit_xdp(bp, txr, mapping + offset, *len,
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
337
dma_addr_t mapping;
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
362
mapping = dma_map_single(&pdev->dev, xdp->data, xdp->len,
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
365
if (dma_mapping_error(&pdev->dev, mapping))
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
368
__bnxt_xmit_xdp_redirect(bp, txr, mapping, xdp->len, xdp);
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
56
txbd->tx_bd_haddr = cpu_to_le64(mapping);
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
96
dma_addr_t mapping, u32 len, u16 rx_prod,
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
17
dma_addr_t mapping, u32 len,
drivers/net/ethernet/broadcom/cnic.c
1227
&cp->ctx_arr[i].mapping,
drivers/net/ethernet/broadcom/cnic.c
1233
if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) {
drivers/net/ethernet/broadcom/cnic.c
1674
if (cp->ctx_arr[blk].mapping & mask)
drivers/net/ethernet/broadcom/cnic.c
1676
(cp->ctx_arr[blk].mapping & mask);
drivers/net/ethernet/broadcom/cnic.c
1678
ctx_map = cp->ctx_arr[blk].mapping + align_off +
drivers/net/ethernet/broadcom/cnic.c
4401
(cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
drivers/net/ethernet/broadcom/cnic.c
4403
(u64) cp->ctx_arr[i].mapping >> 32);
drivers/net/ethernet/broadcom/cnic.c
4855
dma_addr_t map = ctx->mapping;
drivers/net/ethernet/broadcom/cnic.c
835
cp->ctx_arr[i].mapping);
drivers/net/ethernet/broadcom/cnic.c
950
&cp->ctx_arr[i].mapping,
drivers/net/ethernet/broadcom/cnic.h
126
dma_addr_t mapping;
drivers/net/ethernet/broadcom/genet/bcmgenet.c
2123
dma_addr_t mapping;
drivers/net/ethernet/broadcom/genet/bcmgenet.c
2172
mapping = dma_map_single(kdev, skb->data, size,
drivers/net/ethernet/broadcom/genet/bcmgenet.c
2178
mapping = skb_frag_dma_map(kdev, frag, 0, size,
drivers/net/ethernet/broadcom/genet/bcmgenet.c
2182
ret = dma_mapping_error(kdev, mapping);
drivers/net/ethernet/broadcom/genet/bcmgenet.c
2189
dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
drivers/net/ethernet/broadcom/genet/bcmgenet.c
2210
dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, len_stat);
drivers/net/ethernet/broadcom/genet/bcmgenet.c
2257
dma_addr_t mapping;
drivers/net/ethernet/broadcom/genet/bcmgenet.c
2270
mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len,
drivers/net/ethernet/broadcom/genet/bcmgenet.c
2272
if (dma_mapping_error(kdev, mapping)) {
drivers/net/ethernet/broadcom/genet/bcmgenet.c
2285
dma_unmap_addr_set(cb, dma_addr, mapping);
drivers/net/ethernet/broadcom/genet/bcmgenet.c
2287
dmadesc_set_addr(priv, cb->bd_addr, mapping);
drivers/net/ethernet/broadcom/tg3.c
13594
dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
drivers/net/ethernet/broadcom/tg3.c
13677
mapping);
drivers/net/ethernet/broadcom/tg3.c
13681
mapping);
drivers/net/ethernet/broadcom/tg3.c
6591
dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping),
drivers/net/ethernet/broadcom/tg3.c
6610
dma_unmap_addr(ri, mapping),
drivers/net/ethernet/broadcom/tg3.c
6674
dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), map_sz,
drivers/net/ethernet/broadcom/tg3.c
6699
dma_addr_t mapping;
drivers/net/ethernet/broadcom/tg3.c
6739
mapping = dma_map_single(&tp->pdev->dev, data + TG3_RX_OFFSET(tp),
drivers/net/ethernet/broadcom/tg3.c
6741
if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) {
drivers/net/ethernet/broadcom/tg3.c
6747
dma_unmap_addr_set(map, mapping, mapping);
drivers/net/ethernet/broadcom/tg3.c
6749
desc->addr_hi = ((u64)mapping >> 32);
drivers/net/ethernet/broadcom/tg3.c
6750
desc->addr_lo = ((u64)mapping & 0xffffffff);
drivers/net/ethernet/broadcom/tg3.c
6792
dma_unmap_addr_set(dest_map, mapping,
drivers/net/ethernet/broadcom/tg3.c
6793
dma_unmap_addr(src_map, mapping));
drivers/net/ethernet/broadcom/tg3.c
6863
dma_addr = dma_unmap_addr(ri, mapping);
drivers/net/ethernet/broadcom/tg3.c
6869
dma_addr = dma_unmap_addr(ri, mapping);
drivers/net/ethernet/broadcom/tg3.c
7711
static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
drivers/net/ethernet/broadcom/tg3.c
7713
u32 base = (u32) mapping & 0xffffffff;
drivers/net/ethernet/broadcom/tg3.c
7721
static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
drivers/net/ethernet/broadcom/tg3.c
7725
u32 base = (u32) mapping & 0xffffffff;
drivers/net/ethernet/broadcom/tg3.c
7733
static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
drivers/net/ethernet/broadcom/tg3.c
7738
return ((u64) mapping + len) > DMA_BIT_MASK(40);
drivers/net/ethernet/broadcom/tg3.c
7746
dma_addr_t mapping, u32 len, u32 flags,
drivers/net/ethernet/broadcom/tg3.c
7749
txbd->addr_hi = ((u64) mapping >> 32);
drivers/net/ethernet/broadcom/tg3.c
7750
txbd->addr_lo = ((u64) mapping & 0xffffffff);
drivers/net/ethernet/broadcom/tg3.c
7827
dma_unmap_single(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping),
drivers/net/ethernet/broadcom/tg3.c
7843
dma_unmap_addr(txb, mapping),
drivers/net/ethernet/broadcom/tg3.c
7892
mapping, new_addr);
drivers/net/ethernet/broadcom/tg3.c
7969
dma_addr_t mapping;
drivers/net/ethernet/broadcom/tg3.c
8113
mapping = dma_map_single(&tp->pdev->dev, skb->data, len,
drivers/net/ethernet/broadcom/tg3.c
8115
if (dma_mapping_error(&tp->pdev->dev, mapping))
drivers/net/ethernet/broadcom/tg3.c
8120
dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
drivers/net/ethernet/broadcom/tg3.c
8127
if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
drivers/net/ethernet/broadcom/tg3.c
8147
mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
drivers/net/ethernet/broadcom/tg3.c
8151
dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
drivers/net/ethernet/broadcom/tg3.c
8152
mapping);
drivers/net/ethernet/broadcom/tg3.c
8153
if (dma_mapping_error(&tp->pdev->dev, mapping))
drivers/net/ethernet/broadcom/tg3.c
8157
tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
drivers/net/ethernet/broadcom/tg3.c
9505
dma_addr_t mapping, u32 maxlen_flags,
drivers/net/ethernet/broadcom/tg3.c
9510
((u64) mapping >> 32));
drivers/net/ethernet/broadcom/tg3.c
9513
((u64) mapping & 0xffffffff));
drivers/net/ethernet/broadcom/tg3.c
9754
u64 mapping = (u64)tnapi->status_mapping;
drivers/net/ethernet/broadcom/tg3.c
9755
tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
drivers/net/ethernet/broadcom/tg3.c
9756
tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
drivers/net/ethernet/broadcom/tg3.h
2864
DEFINE_DMA_UNMAP_ADDR(mapping);
drivers/net/ethernet/broadcom/tg3.h
2869
DEFINE_DMA_UNMAP_ADDR(mapping);
drivers/net/ethernet/cadence/macb.h
970
dma_addr_t mapping;
drivers/net/ethernet/cadence/macb_main.c
1063
if (tx_skb->mapping) {
drivers/net/ethernet/cadence/macb_main.c
1065
dma_unmap_page(&bp->pdev->dev, tx_skb->mapping,
drivers/net/ethernet/cadence/macb_main.c
1068
dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
drivers/net/ethernet/cadence/macb_main.c
1070
tx_skb->mapping = 0;
drivers/net/ethernet/cadence/macb_main.c
2094
dma_addr_t mapping;
drivers/net/ethernet/cadence/macb_main.c
2116
mapping = dma_map_single(&bp->pdev->dev,
drivers/net/ethernet/cadence/macb_main.c
2119
if (dma_mapping_error(&bp->pdev->dev, mapping))
drivers/net/ethernet/cadence/macb_main.c
2124
tx_skb->mapping = mapping;
drivers/net/ethernet/cadence/macb_main.c
2145
mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
drivers/net/ethernet/cadence/macb_main.c
2147
if (dma_mapping_error(&bp->pdev->dev, mapping))
drivers/net/ethernet/cadence/macb_main.c
2152
tx_skb->mapping = mapping;
drivers/net/ethernet/cadence/macb_main.c
2226
macb_set_addr(bp, desc, tx_skb->mapping);
drivers/net/ethernet/cadence/macb_main.c
4937
lp->rm9200_txq[desc].mapping = dma_map_single(&lp->pdev->dev, skb->data,
drivers/net/ethernet/cadence/macb_main.c
4939
if (dma_mapping_error(&lp->pdev->dev, lp->rm9200_txq[desc].mapping)) {
drivers/net/ethernet/cadence/macb_main.c
4947
macb_writel(lp, TAR, lp->rm9200_txq[desc].mapping);
drivers/net/ethernet/cadence/macb_main.c
5031
dma_unmap_single(&lp->pdev->dev, lp->rm9200_txq[desc].mapping,
drivers/net/ethernet/chelsio/cxgb/sge.c
1142
static inline void write_tx_desc(struct cmdQ_e *e, dma_addr_t mapping,
drivers/net/ethernet/chelsio/cxgb/sge.c
1148
e->addr_lo = (u32)mapping;
drivers/net/ethernet/chelsio/cxgb/sge.c
1149
e->addr_hi = (u64)mapping >> 32;
drivers/net/ethernet/chelsio/cxgb/sge.c
1205
dma_addr_t mapping, desc_mapping;
drivers/net/ethernet/chelsio/cxgb/sge.c
1214
mapping = dma_map_single(&adapter->pdev->dev, skb->data,
drivers/net/ethernet/chelsio/cxgb/sge.c
1217
desc_mapping = mapping;
drivers/net/ethernet/chelsio/cxgb/sge.c
1253
dma_unmap_addr_set(ce, dma_addr, mapping);
drivers/net/ethernet/chelsio/cxgb/sge.c
1267
mapping = skb_frag_dma_map(&adapter->pdev->dev, frag, 0,
drivers/net/ethernet/chelsio/cxgb/sge.c
1269
desc_mapping = mapping;
drivers/net/ethernet/chelsio/cxgb/sge.c
1279
dma_unmap_addr_set(ce, dma_addr, mapping);
drivers/net/ethernet/chelsio/cxgb/sge.c
830
dma_addr_t mapping;
drivers/net/ethernet/chelsio/cxgb/sge.c
837
mapping = dma_map_single(&pdev->dev, skb->data, dma_len,
drivers/net/ethernet/chelsio/cxgb/sge.c
842
dma_unmap_addr_set(ce, dma_addr, mapping);
drivers/net/ethernet/chelsio/cxgb/sge.c
844
e->addr_lo = (u32)mapping;
drivers/net/ethernet/chelsio/cxgb/sge.c
845
e->addr_hi = (u64)mapping >> 32;
drivers/net/ethernet/chelsio/cxgb3/adapter.h
96
dma_addr_t mapping;
drivers/net/ethernet/chelsio/cxgb3/sge.c
2158
dma_unmap_page(&adap->pdev->dev, sd->pg_chunk.mapping,
drivers/net/ethernet/chelsio/cxgb3/sge.c
355
dma_unmap_page(&pdev->dev, d->pg_chunk.mapping,
drivers/net/ethernet/chelsio/cxgb3/sge.c
411
dma_addr_t mapping;
drivers/net/ethernet/chelsio/cxgb3/sge.c
413
mapping = dma_map_single(&pdev->dev, va, len, DMA_FROM_DEVICE);
drivers/net/ethernet/chelsio/cxgb3/sge.c
414
if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
drivers/net/ethernet/chelsio/cxgb3/sge.c
417
dma_unmap_addr_set(sd, dma_addr, mapping);
drivers/net/ethernet/chelsio/cxgb3/sge.c
419
d->addr_lo = cpu_to_be32(mapping);
drivers/net/ethernet/chelsio/cxgb3/sge.c
420
d->addr_hi = cpu_to_be32((u64) mapping >> 32);
drivers/net/ethernet/chelsio/cxgb3/sge.c
427
static inline int add_one_rx_chunk(dma_addr_t mapping, struct rx_desc *d,
drivers/net/ethernet/chelsio/cxgb3/sge.c
430
d->addr_lo = cpu_to_be32(mapping);
drivers/net/ethernet/chelsio/cxgb3/sge.c
431
d->addr_hi = cpu_to_be32((u64) mapping >> 32);
drivers/net/ethernet/chelsio/cxgb3/sge.c
443
dma_addr_t mapping;
drivers/net/ethernet/chelsio/cxgb3/sge.c
452
mapping = dma_map_page(&adapter->pdev->dev, q->pg_chunk.page,
drivers/net/ethernet/chelsio/cxgb3/sge.c
454
if (unlikely(dma_mapping_error(&adapter->pdev->dev, mapping))) {
drivers/net/ethernet/chelsio/cxgb3/sge.c
459
q->pg_chunk.mapping = mapping;
drivers/net/ethernet/chelsio/cxgb3/sge.c
508
dma_addr_t mapping;
drivers/net/ethernet/chelsio/cxgb3/sge.c
517
mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset;
drivers/net/ethernet/chelsio/cxgb3/sge.c
518
dma_unmap_addr_set(sd, dma_addr, mapping);
drivers/net/ethernet/chelsio/cxgb3/sge.c
520
add_one_rx_chunk(mapping, d, q->gen);
drivers/net/ethernet/chelsio/cxgb3/sge.c
521
dma_sync_single_for_device(&adap->pdev->dev, mapping,
drivers/net/ethernet/chelsio/cxgb3/sge.c
885
dma_unmap_page(&adap->pdev->dev, sd->pg_chunk.mapping,
drivers/net/ethernet/chelsio/cxgb4/sge.c
515
dma_addr_t mapping)
drivers/net/ethernet/chelsio/cxgb4/sge.c
518
sd->dma_addr = mapping; /* includes size low bits */
drivers/net/ethernet/chelsio/cxgb4/sge.c
540
dma_addr_t mapping;
drivers/net/ethernet/chelsio/cxgb4/sge.c
567
mapping = dma_map_page(adap->pdev_dev, pg, 0,
drivers/net/ethernet/chelsio/cxgb4/sge.c
570
if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
drivers/net/ethernet/chelsio/cxgb4/sge.c
575
mapping |= RX_LARGE_PG_BUF;
drivers/net/ethernet/chelsio/cxgb4/sge.c
576
*d++ = cpu_to_be64(mapping);
drivers/net/ethernet/chelsio/cxgb4/sge.c
578
set_rx_sw_desc(sd, pg, mapping);
drivers/net/ethernet/chelsio/cxgb4/sge.c
598
mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
drivers/net/ethernet/chelsio/cxgb4/sge.c
600
if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
drivers/net/ethernet/chelsio/cxgb4/sge.c
605
*d++ = cpu_to_be64(mapping);
drivers/net/ethernet/chelsio/cxgb4/sge.c
607
set_rx_sw_desc(sd, pg, mapping);
drivers/net/ethernet/cortina/gemini.c
1007
dma_addr_t mapping;
drivers/net/ethernet/cortina/gemini.c
1009
mapping = geth->freeq_ring[pn << fpp_order].word2.buf_adr;
drivers/net/ethernet/cortina/gemini.c
1010
dma_unmap_single(geth->dev, mapping, frag_len, DMA_FROM_DEVICE);
drivers/net/ethernet/cortina/gemini.c
1145
dma_addr_t mapping;
drivers/net/ethernet/cortina/gemini.c
1239
mapping = dma_map_single(geth->dev, buffer, buflen,
drivers/net/ethernet/cortina/gemini.c
1241
if (dma_mapping_error(geth->dev, mapping))
drivers/net/ethernet/cortina/gemini.c
1247
txd->word2.buf_adr = mapping;
drivers/net/ethernet/cortina/gemini.c
1457
dma_addr_t mapping;
drivers/net/ethernet/cortina/gemini.c
1474
mapping = rx->word2.buf_adr;
drivers/net/ethernet/cortina/gemini.c
1482
page_offs = mapping & ~PAGE_MASK;
drivers/net/ethernet/cortina/gemini.c
1484
if (!mapping) {
drivers/net/ethernet/cortina/gemini.c
1491
gpage = gmac_get_queue_page(geth, port, mapping + PAGE_SIZE);
drivers/net/ethernet/cortina/gemini.c
1546
if (mapping)
drivers/net/ethernet/cortina/gemini.c
615
dma_addr_t mapping;
drivers/net/ethernet/cortina/gemini.c
624
mapping = txd->word2.buf_adr;
drivers/net/ethernet/cortina/gemini.c
627
dma_unmap_single(geth->dev, mapping,
drivers/net/ethernet/cortina/gemini.c
728
dma_addr_t mapping;
drivers/net/ethernet/cortina/gemini.c
732
mapping = addr & PAGE_MASK;
drivers/net/ethernet/cortina/gemini.c
742
if (gpage->mapping == mapping)
drivers/net/ethernet/cortina/gemini.c
758
dma_addr_t mapping;
drivers/net/ethernet/cortina/gemini.c
778
mapping = rxd[r].word2.buf_adr;
drivers/net/ethernet/cortina/gemini.c
782
if (!mapping)
drivers/net/ethernet/cortina/gemini.c
786
gpage = gmac_get_queue_page(geth, port, mapping + PAGE_SIZE);
drivers/net/ethernet/cortina/gemini.c
806
dma_addr_t mapping;
drivers/net/ethernet/cortina/gemini.c
815
mapping = dma_map_single(geth->dev, page_address(page),
drivers/net/ethernet/cortina/gemini.c
817
if (dma_mapping_error(geth->dev, mapping)) {
drivers/net/ethernet/cortina/gemini.c
834
freeq_entry->word2.buf_adr = mapping;
drivers/net/ethernet/cortina/gemini.c
836
mapping += frag_len;
drivers/net/ethernet/cortina/gemini.c
842
mapping = geth->freeq_ring[pn << fpp_order].word2.buf_adr;
drivers/net/ethernet/cortina/gemini.c
843
dma_unmap_single(geth->dev, mapping, frag_len, DMA_FROM_DEVICE);
drivers/net/ethernet/cortina/gemini.c
852
pn, (unsigned int)mapping, page);
drivers/net/ethernet/cortina/gemini.c
853
gpage->mapping = mapping;
drivers/net/ethernet/cortina/gemini.c
93
dma_addr_t mapping;
drivers/net/ethernet/cortina/gemini.c
971
dma_addr_t mapping;
drivers/net/ethernet/cortina/gemini.c
974
mapping = geth->freeq_ring[pn << fpp_order].word2.buf_adr;
drivers/net/ethernet/cortina/gemini.c
975
dma_unmap_single(geth->dev, mapping, frag_len, DMA_FROM_DEVICE);
drivers/net/ethernet/dec/tulip/de2104x.c
1287
de->rx_skb[i].mapping = dma_map_single(&de->pdev->dev,
drivers/net/ethernet/dec/tulip/de2104x.c
1299
de->rx_ring[i].addr1 = cpu_to_le32(de->rx_skb[i].mapping);
drivers/net/ethernet/dec/tulip/de2104x.c
1345
de->rx_skb[i].mapping, de->rx_buf_sz,
drivers/net/ethernet/dec/tulip/de2104x.c
1357
de->tx_skb[i].mapping,
drivers/net/ethernet/dec/tulip/de2104x.c
1362
de->tx_skb[i].mapping,
drivers/net/ethernet/dec/tulip/de2104x.c
288
dma_addr_t mapping;
drivers/net/ethernet/dec/tulip/de2104x.c
403
dma_addr_t mapping;
drivers/net/ethernet/dec/tulip/de2104x.c
418
mapping = de->rx_skb[rx_tail].mapping;
drivers/net/ethernet/dec/tulip/de2104x.c
446
dma_unmap_single(&de->pdev->dev, mapping, buflen,
drivers/net/ethernet/dec/tulip/de2104x.c
450
mapping =
drivers/net/ethernet/dec/tulip/de2104x.c
451
de->rx_skb[rx_tail].mapping =
drivers/net/ethernet/dec/tulip/de2104x.c
456
dma_sync_single_for_cpu(&de->pdev->dev, mapping, len,
drivers/net/ethernet/dec/tulip/de2104x.c
461
dma_sync_single_for_device(&de->pdev->dev, mapping,
drivers/net/ethernet/dec/tulip/de2104x.c
482
de->rx_ring[rx_tail].addr1 = cpu_to_le32(mapping);
drivers/net/ethernet/dec/tulip/de2104x.c
560
de->tx_skb[tx_tail].mapping,
drivers/net/ethernet/dec/tulip/de2104x.c
566
dma_unmap_single(&de->pdev->dev, de->tx_skb[tx_tail].mapping,
drivers/net/ethernet/dec/tulip/de2104x.c
609
u32 mapping, len, flags = FirstFrag | LastFrag;
drivers/net/ethernet/dec/tulip/de2104x.c
627
mapping = dma_map_single(&de->pdev->dev, skb->data, len,
drivers/net/ethernet/dec/tulip/de2104x.c
635
txd->addr1 = cpu_to_le32(mapping);
drivers/net/ethernet/dec/tulip/de2104x.c
638
de->tx_skb[entry].mapping = mapping;
drivers/net/ethernet/dec/tulip/de2104x.c
725
u32 mapping;
drivers/net/ethernet/dec/tulip/de2104x.c
770
de->tx_skb[entry].mapping = mapping =
drivers/net/ethernet/dec/tulip/de2104x.c
780
txd->addr1 = cpu_to_le32(mapping);
drivers/net/ethernet/dec/tulip/interrupt.c
214
tp->rx_buffers[entry].mapping,
drivers/net/ethernet/dec/tulip/interrupt.c
227
tp->rx_buffers[entry].mapping,
drivers/net/ethernet/dec/tulip/interrupt.c
235
if (tp->rx_buffers[entry].mapping !=
drivers/net/ethernet/dec/tulip/interrupt.c
240
(unsigned long long)tp->rx_buffers[entry].mapping,
drivers/net/ethernet/dec/tulip/interrupt.c
246
tp->rx_buffers[entry].mapping,
drivers/net/ethernet/dec/tulip/interrupt.c
251
tp->rx_buffers[entry].mapping = 0;
drivers/net/ethernet/dec/tulip/interrupt.c
444
tp->rx_buffers[entry].mapping,
drivers/net/ethernet/dec/tulip/interrupt.c
457
tp->rx_buffers[entry].mapping,
drivers/net/ethernet/dec/tulip/interrupt.c
465
if (tp->rx_buffers[entry].mapping !=
drivers/net/ethernet/dec/tulip/interrupt.c
470
(long long)tp->rx_buffers[entry].mapping,
drivers/net/ethernet/dec/tulip/interrupt.c
476
tp->rx_buffers[entry].mapping,
drivers/net/ethernet/dec/tulip/interrupt.c
480
tp->rx_buffers[entry].mapping = 0;
drivers/net/ethernet/dec/tulip/interrupt.c
606
if (tp->tx_buffers[entry].mapping)
drivers/net/ethernet/dec/tulip/interrupt.c
608
tp->tx_buffers[entry].mapping,
drivers/net/ethernet/dec/tulip/interrupt.c
640
tp->tx_buffers[entry].mapping,
drivers/net/ethernet/dec/tulip/interrupt.c
647
tp->tx_buffers[entry].mapping = 0;
drivers/net/ethernet/dec/tulip/interrupt.c
70
dma_addr_t mapping;
drivers/net/ethernet/dec/tulip/interrupt.c
77
mapping = dma_map_single(&tp->pdev->dev, skb->data,
drivers/net/ethernet/dec/tulip/interrupt.c
79
if (dma_mapping_error(&tp->pdev->dev, mapping)) {
drivers/net/ethernet/dec/tulip/interrupt.c
85
tp->rx_buffers[entry].mapping = mapping;
drivers/net/ethernet/dec/tulip/interrupt.c
87
tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
drivers/net/ethernet/dec/tulip/tulip.h
397
dma_addr_t mapping;
drivers/net/ethernet/dec/tulip/tulip_core.c
1142
tp->tx_buffers[entry].mapping = 0;
drivers/net/ethernet/dec/tulip/tulip_core.c
1153
tp->tx_buffers[entry].mapping =
drivers/net/ethernet/dec/tulip/tulip_core.c
1163
cpu_to_le32(tp->tx_buffers[entry].mapping);
drivers/net/ethernet/dec/tulip/tulip_core.c
344
dma_addr_t mapping;
drivers/net/ethernet/dec/tulip/tulip_core.c
353
mapping = dma_map_single(&tp->pdev->dev, tp->setup_frame,
drivers/net/ethernet/dec/tulip/tulip_core.c
357
tp->tx_buffers[tp->cur_tx].mapping = mapping;
drivers/net/ethernet/dec/tulip/tulip_core.c
361
tp->tx_ring[tp->cur_tx].buffer1 = cpu_to_le32(mapping);
drivers/net/ethernet/dec/tulip/tulip_core.c
617
tp->rx_buffers[i].mapping = 0;
drivers/net/ethernet/dec/tulip/tulip_core.c
624
dma_addr_t mapping;
drivers/net/ethernet/dec/tulip/tulip_core.c
633
mapping = dma_map_single(&tp->pdev->dev, skb->data,
drivers/net/ethernet/dec/tulip/tulip_core.c
635
tp->rx_buffers[i].mapping = mapping;
drivers/net/ethernet/dec/tulip/tulip_core.c
637
tp->rx_ring[i].buffer1 = cpu_to_le32(mapping);
drivers/net/ethernet/dec/tulip/tulip_core.c
645
tp->tx_buffers[i].mapping = 0;
drivers/net/ethernet/dec/tulip/tulip_core.c
658
dma_addr_t mapping;
drivers/net/ethernet/dec/tulip/tulip_core.c
667
mapping = dma_map_single(&tp->pdev->dev, skb->data, skb->len,
drivers/net/ethernet/dec/tulip/tulip_core.c
669
tp->tx_buffers[entry].mapping = mapping;
drivers/net/ethernet/dec/tulip/tulip_core.c
670
tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping);
drivers/net/ethernet/dec/tulip/tulip_core.c
718
if (tp->tx_buffers[entry].mapping)
drivers/net/ethernet/dec/tulip/tulip_core.c
720
tp->tx_buffers[entry].mapping,
drivers/net/ethernet/dec/tulip/tulip_core.c
727
tp->tx_buffers[entry].mapping,
drivers/net/ethernet/dec/tulip/tulip_core.c
734
tp->tx_buffers[entry].mapping = 0;
drivers/net/ethernet/dec/tulip/tulip_core.c
789
dma_addr_t mapping = tp->rx_buffers[i].mapping;
drivers/net/ethernet/dec/tulip/tulip_core.c
792
tp->rx_buffers[i].mapping = 0;
drivers/net/ethernet/dec/tulip/tulip_core.c
799
dma_unmap_single(&tp->pdev->dev, mapping, PKT_BUF_SZ,
drivers/net/ethernet/dec/tulip/tulip_core.c
810
tp->tx_buffers[i].mapping, skb->len,
drivers/net/ethernet/dec/tulip/tulip_core.c
815
tp->tx_buffers[i].mapping = 0;
drivers/net/ethernet/jme.c
1005
dma_sync_single_for_cpu(&jme->pdev->dev, rxbi->mapping, rxbi->len,
drivers/net/ethernet/jme.c
1009
dma_sync_single_for_device(&jme->pdev->dev, rxbi->mapping,
drivers/net/ethernet/jme.c
1450
ttxbi->mapping, ttxbi->len,
drivers/net/ethernet/jme.c
1453
ttxbi->mapping = 0;
drivers/net/ethernet/jme.c
1978
txbi->mapping = dmaaddr;
drivers/net/ethernet/jme.c
1992
dma_unmap_page(&jme->pdev->dev, ctxbi->mapping, ctxbi->len,
drivers/net/ethernet/jme.c
1995
ctxbi->mapping = 0;
drivers/net/ethernet/jme.c
617
txbi->mapping = 0;
drivers/net/ethernet/jme.c
715
rxdesc->desc1.bufaddrh = cpu_to_le32((__u64)rxbi->mapping >> 32);
drivers/net/ethernet/jme.c
717
(__u64)rxbi->mapping & 0xFFFFFFFFUL);
drivers/net/ethernet/jme.c
731
dma_addr_t mapping;
drivers/net/ethernet/jme.c
738
mapping = dma_map_page(&jme->pdev->dev, virt_to_page(skb->data),
drivers/net/ethernet/jme.c
741
if (unlikely(dma_mapping_error(&jme->pdev->dev, mapping))) {
drivers/net/ethernet/jme.c
746
if (likely(rxbi->mapping))
drivers/net/ethernet/jme.c
747
dma_unmap_page(&jme->pdev->dev, rxbi->mapping, rxbi->len,
drivers/net/ethernet/jme.c
752
rxbi->mapping = mapping;
drivers/net/ethernet/jme.c
764
dma_unmap_page(&jme->pdev->dev, rxbi->mapping, rxbi->len,
drivers/net/ethernet/jme.c
768
rxbi->mapping = 0;
drivers/net/ethernet/jme.h
354
dma_addr_t mapping;
drivers/net/ethernet/lantiq_xrx200.c
191
dma_addr_t mapping;
drivers/net/ethernet/lantiq_xrx200.c
201
mapping = dma_map_single(priv->dev, ch->rx_buff[ch->dma.desc],
drivers/net/ethernet/lantiq_xrx200.c
203
if (unlikely(dma_mapping_error(priv->dev, mapping))) {
drivers/net/ethernet/lantiq_xrx200.c
210
ch->dma.desc_base[ch->dma.desc].addr = mapping + NET_SKB_PAD + NET_IP_ALIGN;
drivers/net/ethernet/lantiq_xrx200.c
364
dma_addr_t mapping;
drivers/net/ethernet/lantiq_xrx200.c
383
mapping = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE);
drivers/net/ethernet/lantiq_xrx200.c
384
if (unlikely(dma_mapping_error(priv->dev, mapping)))
drivers/net/ethernet/lantiq_xrx200.c
388
byte_offset = mapping % (XRX200_DMA_BURST_LEN * 4);
drivers/net/ethernet/lantiq_xrx200.c
390
desc->addr = mapping - byte_offset;
drivers/net/ethernet/marvell/sky2.c
1831
dma_addr_t mapping;
drivers/net/ethernet/marvell/sky2.c
1841
mapping = dma_map_single(&hw->pdev->dev, skb->data, len,
drivers/net/ethernet/marvell/sky2.c
1844
if (dma_mapping_error(&hw->pdev->dev, mapping))
drivers/net/ethernet/marvell/sky2.c
1852
upper = upper_32_bits(mapping);
drivers/net/ethernet/marvell/sky2.c
1923
dma_unmap_addr_set(re, mapaddr, mapping);
drivers/net/ethernet/marvell/sky2.c
1927
le->addr = cpu_to_le32(lower_32_bits(mapping));
drivers/net/ethernet/marvell/sky2.c
1936
mapping = skb_frag_dma_map(&hw->pdev->dev, frag, 0,
drivers/net/ethernet/marvell/sky2.c
1939
if (dma_mapping_error(&hw->pdev->dev, mapping))
drivers/net/ethernet/marvell/sky2.c
1942
upper = upper_32_bits(mapping);
drivers/net/ethernet/marvell/sky2.c
1952
dma_unmap_addr_set(re, mapaddr, mapping);
drivers/net/ethernet/marvell/sky2.c
1956
le->addr = cpu_to_le32(lower_32_bits(mapping));
drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c
15
u32 mapping;
drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c
161
u32 mapping;
drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c
185
err = mapping_add(ctx, &mapped_obj, &mapping);
drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c
192
int_port->mapping = mapping;
drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c
214
mapping_remove(ctx, int_port->mapping);
drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c
243
mapping_remove(ctx, int_port->mapping);
drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c
91
flow_context->flow_tag = int_port->mapping;
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
5271
tc->mapping = chains_mapping;
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
5283
attr.mapping = chains_mapping;
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
5368
mapping_destroy(tc->mapping);
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
5400
struct mapping_ctx *mapping;
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
5424
mapping = mapping_create_for_id(mapping_id, id_len, MAPPING_TYPE_TUNNEL,
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
5428
if (IS_ERR(mapping)) {
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
5429
err = PTR_ERR(mapping);
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
5432
uplink_priv->tunnel_mapping = mapping;
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
5437
mapping = mapping_create_for_id(mapping_id, id_len,
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
5441
if (IS_ERR(mapping)) {
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
5442
err = PTR_ERR(mapping);
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
5445
uplink_priv->tunnel_enc_opts_mapping = mapping;
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
5820
mapping_ctx = tc->mapping;
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
5838
ctx = tc->mapping;
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
99
struct mapping_ctx *mapping;
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
1577
attr.mapping = esw->offloads.reg_c0_obj_pool;
drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c
158
DEFINE_SHOW_ATTRIBUTE(mapping);
drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
739
chains->chains_mapping = attr->mapping;
drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h
25
struct mapping_ctx *mapping;
drivers/net/ethernet/mellanox/mlxsw/core.c
2150
alloc_size = sizeof(*mlxsw_core->lag.mapping) * max_lag *
drivers/net/ethernet/mellanox/mlxsw/core.c
2152
mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL);
drivers/net/ethernet/mellanox/mlxsw/core.c
2153
if (!mlxsw_core->lag.mapping) {
drivers/net/ethernet/mellanox/mlxsw/core.c
2227
kfree(mlxsw_core->lag.mapping);
drivers/net/ethernet/mellanox/mlxsw/core.c
2300
kfree(mlxsw_core->lag.mapping);
drivers/net/ethernet/mellanox/mlxsw/core.c
3010
mlxsw_core->lag.mapping[index] = local_port;
drivers/net/ethernet/mellanox/mlxsw/core.c
3020
return mlxsw_core->lag.mapping[index];
drivers/net/ethernet/mellanox/mlxsw/core.c
3033
if (mlxsw_core->lag.mapping[index] == local_port)
drivers/net/ethernet/mellanox/mlxsw/core.c
3034
mlxsw_core->lag.mapping[index] = 0;
drivers/net/ethernet/mellanox/mlxsw/core.c
85
u16 *mapping; /* lag_id+port_index to local_port mapping */
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
1413
u8 slot_index = mlxsw_sp_port->mapping.slot_index;
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
1414
u8 module = mlxsw_sp_port->mapping.module;
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
1519
mlxsw_sp_port->mapping = *port_mapping;
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
1749
u8 slot_index = mlxsw_sp_port->mapping.slot_index;
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
1750
u8 module = mlxsw_sp_port->mapping.module;
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2132
mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index,
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2133
mlxsw_sp_port->mapping.module,
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2134
mlxsw_sp_port->mapping.module_width / count,
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2148
port_mapping = mlxsw_sp_port->mapping;
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2195
count = mlxsw_sp_port->mapping.module_width /
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2196
mlxsw_sp_port->mapping.width;
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2198
mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index,
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2199
mlxsw_sp_port->mapping.module,
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2200
mlxsw_sp_port->mapping.module_width / count,
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
584
mlxsw_sp_port->mapping.slot_index,
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
585
mlxsw_sp_port->mapping.module);
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
596
mlxsw_sp_port->mapping.slot_index,
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
597
mlxsw_sp_port->mapping.module);
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
609
mlxsw_sp_port->mapping.slot_index,
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
610
mlxsw_sp_port->mapping.module);
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
336
struct mlxsw_sp_port_mapping mapping; /* mapping is constant during the
drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
145
return mlxsw_sp_port->mapping.width == 8 ? 2 * size_cells : size_cells;
drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
1008
ops->to_ptys_speed_lanes(mlxsw_sp, mlxsw_sp_port->mapping.width,
drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
1041
mlxsw_sp_port->mapping.slot_index,
drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
1042
mlxsw_sp_port->mapping.module,
drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
1051
u8 slot_index = mlxsw_sp_port->mapping.slot_index;
drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
1052
u8 module = mlxsw_sp_port->mapping.module;
drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
1065
u8 slot_index = mlxsw_sp_port->mapping.slot_index;
drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
1066
u8 module = mlxsw_sp_port->mapping.module;
drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
1079
u8 slot_index = mlxsw_sp_port->mapping.slot_index;
drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
1080
u8 module = mlxsw_sp_port->mapping.module;
drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
1224
u8 slot_index = mlxsw_sp_port->mapping.slot_index;
drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
1225
u8 module = mlxsw_sp_port->mapping.module;
drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
1238
u8 slot_index = mlxsw_sp_port->mapping.slot_index;
drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
1239
u8 module = mlxsw_sp_port->mapping.module;
drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
1252
u8 slot_index = mlxsw_sp_port->mapping.slot_index;
drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
1253
u8 module = mlxsw_sp_port->mapping.module;
drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
572
u8 slot_index = mlxsw_sp_port->mapping.slot_index;
drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
573
u8 module = mlxsw_sp_port->mapping.module;
drivers/net/ethernet/mscc/ocelot_fdma.c
125
dma_addr_t mapping;
drivers/net/ethernet/mscc/ocelot_fdma.c
132
mapping = dma_map_page(ocelot->dev, page, 0, PAGE_SIZE,
drivers/net/ethernet/mscc/ocelot_fdma.c
134
if (unlikely(dma_mapping_error(ocelot->dev, mapping))) {
drivers/net/ethernet/mscc/ocelot_fdma.c
141
rxb->dma_addr = mapping;
drivers/net/ethernet/mscc/ocelot_fdma.c
191
dma_addr_t mapping;
drivers/net/ethernet/mscc/ocelot_fdma.c
193
mapping = dma_map_single(ocelot->dev, skb->data, skb->len,
drivers/net/ethernet/mscc/ocelot_fdma.c
195
if (unlikely(dma_mapping_error(ocelot->dev, mapping)))
drivers/net/ethernet/mscc/ocelot_fdma.c
198
dma_unmap_addr_set(tx_buf, dma_addr, mapping);
drivers/net/ethernet/mscc/ocelot_fdma.c
200
ocelot_fdma_dcb_set_data(dcb, mapping, OCELOT_FDMA_RX_SIZE);
drivers/net/ethernet/ni/nixge.c
162
dma_addr_t mapping;
drivers/net/ethernet/ni/nixge.c
443
if (tx_skb->mapping) {
drivers/net/ethernet/ni/nixge.c
445
dma_unmap_page(priv->ndev->dev.parent, tx_skb->mapping,
drivers/net/ethernet/ni/nixge.c
449
tx_skb->mapping,
drivers/net/ethernet/ni/nixge.c
451
tx_skb->mapping = 0;
drivers/net/ethernet/ni/nixge.c
536
tx_skb->mapping = cur_phys;
drivers/net/ethernet/ni/nixge.c
557
tx_skb->mapping = cur_phys;
drivers/net/ethernet/ni/nixge.c
588
tx_skb->mapping,
drivers/net/ethernet/qlogic/qed/qed_ll2.c
2683
dma_addr_t mapping;
drivers/net/ethernet/qlogic/qed/qed_ll2.c
2702
mapping = dma_map_single(&cdev->pdev->dev, skb->data,
drivers/net/ethernet/qlogic/qed/qed_ll2.c
2704
if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
drivers/net/ethernet/qlogic/qed/qed_ll2.c
2724
pkt.first_frag = mapping;
drivers/net/ethernet/qlogic/qed/qed_ll2.c
2744
mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
drivers/net/ethernet/qlogic/qed/qed_ll2.c
2747
if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
drivers/net/ethernet/qlogic/qed/qed_ll2.c
2756
mapping,
drivers/net/ethernet/qlogic/qed/qed_ll2.c
2769
dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE);
drivers/net/ethernet/qlogic/qede/qede.h
295
dma_addr_t mapping;
drivers/net/ethernet/qlogic/qede/qede.h
388
dma_addr_t mapping;
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
1474
dma_addr_t mapping;
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
1504
mapping = dma_map_single(&edev->pdev->dev, skb->data,
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
1506
if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
1510
BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
drivers/net/ethernet/qlogic/qede/qede_filter.c
100
params.addr = n->mapping;
drivers/net/ethernet/qlogic/qede/qede_filter.c
142
fltr->mapping = dma_map_single(&edev->pdev->dev, fltr->data,
drivers/net/ethernet/qlogic/qede/qede_filter.c
144
if (dma_mapping_error(&edev->pdev->dev, fltr->mapping)) {
drivers/net/ethernet/qlogic/qede/qede_filter.c
170
dma_unmap_single(&edev->pdev->dev, fltr->mapping,
drivers/net/ethernet/qlogic/qede/qede_filter.c
52
dma_addr_t mapping;
drivers/net/ethernet/qlogic/qede/qede_fp.c
1120
if (unlikely(qede_xdp_xmit(fp->xdp_tx, bd->mapping,
drivers/net/ethernet/qlogic/qede/qede_fp.c
1123
dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE,
drivers/net/ethernet/qlogic/qede/qede_fp.c
1130
bd->mapping + *data_offset,
drivers/net/ethernet/qlogic/qede/qede_fp.c
1147
dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE,
drivers/net/ethernet/qlogic/qede/qede_fp.c
1206
dma_unmap_page(rxq->dev, bd->mapping,
drivers/net/ethernet/qlogic/qede/qede_fp.c
1490
dma_addr_t mapping;
drivers/net/ethernet/qlogic/qede/qede_fp.c
1531
mapping = dma_map_single(txq->dev, skb->data,
drivers/net/ethernet/qlogic/qede/qede_fp.c
1533
if (unlikely(dma_mapping_error(txq->dev, mapping))) {
drivers/net/ethernet/qlogic/qede/qede_fp.c
1540
BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
drivers/net/ethernet/qlogic/qede/qede_fp.c
1631
mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi),
drivers/net/ethernet/qlogic/qede/qede_fp.c
1635
BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping,
drivers/net/ethernet/qlogic/qede/qede_fp.c
247
dma_addr_t mapping;
drivers/net/ethernet/qlogic/qede/qede_fp.c
250
mapping = skb_frag_dma_map(txq->dev, frag, 0,
drivers/net/ethernet/qlogic/qede/qede_fp.c
252
if (unlikely(dma_mapping_error(txq->dev, mapping)))
drivers/net/ethernet/qlogic/qede/qede_fp.c
256
BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag));
drivers/net/ethernet/qlogic/qede/qede_fp.c
32
dma_addr_t mapping;
drivers/net/ethernet/qlogic/qede/qede_fp.c
331
xdp->mapping = dma;
drivers/net/ethernet/qlogic/qede/qede_fp.c
347
dma_addr_t mapping;
drivers/net/ethernet/qlogic/qede/qede_fp.c
365
mapping = dma_map_single(dmadev, xdpf->data, xdpf->len,
drivers/net/ethernet/qlogic/qede/qede_fp.c
367
if (unlikely(dma_mapping_error(dmadev, mapping)))
drivers/net/ethernet/qlogic/qede/qede_fp.c
370
if (unlikely(qede_xdp_xmit(xdp_tx, mapping, 0, xdpf->len,
drivers/net/ethernet/qlogic/qede/qede_fp.c
416
dma_unmap_single(dev, xdp_info->mapping, xdpf->len,
drivers/net/ethernet/qlogic/qede/qede_fp.c
422
dma_unmap_page(dev, xdp_info->mapping, PAGE_SIZE,
drivers/net/ethernet/qlogic/qede/qede_fp.c
51
mapping = dma_map_page(rxq->dev, data, 0,
drivers/net/ethernet/qlogic/qede/qede_fp.c
53
if (unlikely(dma_mapping_error(rxq->dev, mapping))) {
drivers/net/ethernet/qlogic/qede/qede_fp.c
535
new_mapping = curr_prod->mapping + curr_prod->page_offset;
drivers/net/ethernet/qlogic/qede/qede_fp.c
575
dma_unmap_page(rxq->dev, curr_cons->mapping,
drivers/net/ethernet/qlogic/qede/qede_fp.c
61
sw_rx_data->mapping = mapping;
drivers/net/ethernet/qlogic/qede/qede_fp.c
66
rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
drivers/net/ethernet/qlogic/qede/qede_fp.c
67
rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping) +
drivers/net/ethernet/qlogic/qede/qede_fp.c
847
tpa_info->buffer.mapping = sw_rx_data_cons->mapping;
drivers/net/ethernet/qlogic/qede/qede_fp.c
986
dma_unmap_page(rxq->dev, tpa_info->buffer.mapping,
drivers/net/ethernet/qlogic/qede/qede_main.c
1518
rx_buf->mapping, PAGE_SIZE, rxq->data_direction);
drivers/net/ethernet/rdc/r6040.c
298
dma_addr_t mapping = desc_dma;
drivers/net/ethernet/rdc/r6040.c
301
mapping += sizeof(*desc);
drivers/net/ethernet/rdc/r6040.c
302
desc->ndesc = cpu_to_le32(mapping);
drivers/net/ethernet/realtek/8139cp.c
1065
dma_addr_t mapping;
drivers/net/ethernet/realtek/8139cp.c
1071
mapping = dma_map_single(&cp->pdev->dev, skb->data,
drivers/net/ethernet/realtek/8139cp.c
1073
if (dma_mapping_error(&cp->pdev->dev, mapping)) {
drivers/net/ethernet/realtek/8139cp.c
1080
cp->rx_ring[i].addr = cpu_to_le64(mapping);
drivers/net/ethernet/realtek/8139cp.c
474
dma_addr_t mapping, new_mapping;
drivers/net/ethernet/realtek/8139cp.c
488
mapping = le64_to_cpu(desc->addr);
drivers/net/ethernet/realtek/8139cp.c
524
dma_unmap_single(&cp->pdev->dev, mapping,
drivers/net/ethernet/realtek/8139cp.c
539
mapping = new_mapping;
drivers/net/ethernet/realtek/8139cp.c
543
cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping);
drivers/net/ethernet/realtek/8139cp.c
781
dma_addr_t mapping;
drivers/net/ethernet/realtek/8139cp.c
784
mapping = dma_map_single(&cp->pdev->dev, skb->data, len, DMA_TO_DEVICE);
drivers/net/ethernet/realtek/8139cp.c
785
if (dma_mapping_error(&cp->pdev->dev, mapping))
drivers/net/ethernet/realtek/8139cp.c
789
txd->addr = cpu_to_le64(mapping);
drivers/net/ethernet/realtek/8139cp.c
822
dma_addr_t mapping;
drivers/net/ethernet/realtek/8139cp.c
827
mapping = dma_map_single(&cp->pdev->dev,
drivers/net/ethernet/realtek/8139cp.c
830
if (dma_mapping_error(&cp->pdev->dev, mapping)) {
drivers/net/ethernet/realtek/8139cp.c
844
txd->addr = cpu_to_le64(mapping);
drivers/net/ethernet/realtek/r8169_main.c
4159
dma_addr_t mapping;
drivers/net/ethernet/realtek/r8169_main.c
4166
mapping = dma_map_page(d, data, 0, R8169_RX_BUF_SIZE, DMA_FROM_DEVICE);
drivers/net/ethernet/realtek/r8169_main.c
4167
if (unlikely(dma_mapping_error(d, mapping))) {
drivers/net/ethernet/realtek/r8169_main.c
4173
desc->addr = cpu_to_le64(mapping);
drivers/net/ethernet/realtek/r8169_main.c
4326
dma_addr_t mapping;
drivers/net/ethernet/realtek/r8169_main.c
4330
mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
drivers/net/ethernet/realtek/r8169_main.c
4331
ret = dma_mapping_error(d, mapping);
drivers/net/ethernet/realtek/r8169_main.c
4338
txd->addr = cpu_to_le64(mapping);
drivers/net/ethernet/realtek/rtase/rtase_main.c
1262
dma_addr_t mapping;
drivers/net/ethernet/realtek/rtase/rtase_main.c
1271
mapping = dma_map_single(&tp->pdev->dev, addr, len,
drivers/net/ethernet/realtek/rtase/rtase_main.c
1274
if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) {
drivers/net/ethernet/realtek/rtase/rtase_main.c
1293
txd->addr = cpu_to_le64(mapping);
drivers/net/ethernet/realtek/rtase/rtase_main.c
1319
dma_addr_t mapping;
drivers/net/ethernet/realtek/rtase/rtase_main.c
1381
mapping = dma_map_single(&tp->pdev->dev, skb->data, len,
drivers/net/ethernet/realtek/rtase/rtase_main.c
1384
if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) {
drivers/net/ethernet/realtek/rtase/rtase_main.c
1392
txd->addr = cpu_to_le64(mapping);
drivers/net/ethernet/realtek/rtase/rtase_main.c
354
static void rtase_map_to_asic(union rtase_rx_desc *desc, dma_addr_t mapping,
drivers/net/ethernet/realtek/rtase/rtase_main.c
357
desc->desc_cmd.addr = cpu_to_le64(mapping);
drivers/net/ethernet/realtek/rtase/rtase_main.c
375
dma_addr_t mapping;
drivers/net/ethernet/realtek/rtase/rtase_main.c
385
mapping = page_pool_get_dma_addr(page);
drivers/net/ethernet/realtek/rtase/rtase_main.c
386
*rx_phy_addr = mapping;
drivers/net/ethernet/realtek/rtase/rtase_main.c
387
rtase_map_to_asic(desc, mapping, tp->rx_buf_sz);
drivers/net/ethernet/sis/sis190.c
1187
dma_addr_t mapping;
drivers/net/ethernet/sis/sis190.c
1209
mapping = dma_map_single(&tp->pci_dev->dev, skb->data, len,
drivers/net/ethernet/sis/sis190.c
1211
if (dma_mapping_error(&tp->pci_dev->dev, mapping)) {
drivers/net/ethernet/sis/sis190.c
1220
desc->addr = cpu_to_le32(mapping);
drivers/net/ethernet/sis/sis190.c
471
static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
drivers/net/ethernet/sis/sis190.c
474
desc->addr = cpu_to_le32(mapping);
drivers/net/ethernet/sis/sis190.c
492
dma_addr_t mapping;
drivers/net/ethernet/sis/sis190.c
497
mapping = dma_map_single(&tp->pci_dev->dev, skb->data, tp->rx_buf_sz,
drivers/net/ethernet/sis/sis190.c
499
if (dma_mapping_error(&tp->pci_dev->dev, mapping))
drivers/net/ethernet/sis/sis190.c
501
sis190_map_to_asic(desc, mapping, rx_buf_sz);
drivers/net/ethernet/smsc/smsc9420.c
1189
pd->tx_buffers[i].mapping = 0;
drivers/net/ethernet/smsc/smsc9420.c
1223
pd->rx_buffers[i].mapping = 0;
drivers/net/ethernet/smsc/smsc9420.c
42
dma_addr_t mapping;
drivers/net/ethernet/smsc/smsc9420.c
502
BUG_ON(!pd->tx_buffers[i].mapping);
drivers/net/ethernet/smsc/smsc9420.c
504
pd->tx_buffers[i].mapping, skb->len,
drivers/net/ethernet/smsc/smsc9420.c
536
if (pd->rx_buffers[i].mapping)
drivers/net/ethernet/smsc/smsc9420.c
538
pd->rx_buffers[i].mapping,
drivers/net/ethernet/smsc/smsc9420.c
757
dma_unmap_single(&pd->pdev->dev, pd->rx_buffers[index].mapping,
drivers/net/ethernet/smsc/smsc9420.c
759
pd->rx_buffers[index].mapping = 0;
drivers/net/ethernet/smsc/smsc9420.c
782
dma_addr_t mapping;
drivers/net/ethernet/smsc/smsc9420.c
785
BUG_ON(pd->rx_buffers[index].mapping);
drivers/net/ethernet/smsc/smsc9420.c
790
mapping = dma_map_single(&pd->pdev->dev, skb_tail_pointer(skb),
drivers/net/ethernet/smsc/smsc9420.c
792
if (dma_mapping_error(&pd->pdev->dev, mapping)) {
drivers/net/ethernet/smsc/smsc9420.c
799
pd->rx_buffers[index].mapping = mapping;
drivers/net/ethernet/smsc/smsc9420.c
800
pd->rx_ring[index].buffer1 = mapping + NET_IP_ALIGN;
drivers/net/ethernet/smsc/smsc9420.c
907
BUG_ON(!pd->tx_buffers[index].mapping);
drivers/net/ethernet/smsc/smsc9420.c
910
pd->tx_buffers[index].mapping,
drivers/net/ethernet/smsc/smsc9420.c
913
pd->tx_buffers[index].mapping = 0;
drivers/net/ethernet/smsc/smsc9420.c
929
dma_addr_t mapping;
drivers/net/ethernet/smsc/smsc9420.c
940
BUG_ON(pd->tx_buffers[index].mapping);
drivers/net/ethernet/smsc/smsc9420.c
942
mapping = dma_map_single(&pd->pdev->dev, skb->data, skb->len,
drivers/net/ethernet/smsc/smsc9420.c
944
if (dma_mapping_error(&pd->pdev->dev, mapping)) {
drivers/net/ethernet/smsc/smsc9420.c
951
pd->tx_buffers[index].mapping = mapping;
drivers/net/ethernet/smsc/smsc9420.c
963
pd->tx_ring[index].buffer1 = mapping;
drivers/net/ethernet/sun/cassini.c
2685
dma_addr_t mapping, int len, u64 ctrl, int last)
drivers/net/ethernet/sun/cassini.c
2695
txd->buffer = cpu_to_le64(mapping);
drivers/net/ethernet/sun/cassini.c
2717
dma_addr_t mapping;
drivers/net/ethernet/sun/cassini.c
2748
mapping = dma_map_page(&cp->pdev->dev, virt_to_page(skb->data),
drivers/net/ethernet/sun/cassini.c
2755
cas_write_txd(cp, ring, entry, mapping, len - tabort,
drivers/net/ethernet/sun/cassini.c
2761
mapping = tx_tiny_map(cp, ring, entry, tentry);
drivers/net/ethernet/sun/cassini.c
2762
cas_write_txd(cp, ring, entry, mapping, tabort, ctrl,
drivers/net/ethernet/sun/cassini.c
2765
cas_write_txd(cp, ring, entry, mapping, len, ctrl |
drivers/net/ethernet/sun/cassini.c
2774
mapping = skb_frag_dma_map(&cp->pdev->dev, fragp, 0, len,
drivers/net/ethernet/sun/cassini.c
2780
cas_write_txd(cp, ring, entry, mapping, len - tabort,
drivers/net/ethernet/sun/cassini.c
2787
mapping = tx_tiny_map(cp, ring, entry, tentry);
drivers/net/ethernet/sun/cassini.c
2791
cas_write_txd(cp, ring, entry, mapping, len, ctrl,
drivers/net/ethernet/sun/niu.c
10238
BUILD_BUG_ON(offsetof(struct page, mapping) !=
drivers/net/ethernet/sun/niu.c
3575
np->ops->unmap_single(np->device, tb->mapping,
drivers/net/ethernet/sun/niu.c
3590
np->ops->unmap_page(np->device, tb->mapping,
drivers/net/ethernet/sun/niu.c
6548
u64 mapping, u64 len, u64 mark,
drivers/net/ethernet/sun/niu.c
6556
(mapping & TX_DESC_SAD));
drivers/net/ethernet/sun/niu.c
6635
u64 mapping, mrk;
drivers/net/ethernet/sun/niu.c
6674
mapping = np->ops->map_single(np->device, skb->data,
drivers/net/ethernet/sun/niu.c
6676
if (np->ops->mapping_error(np->device, mapping))
drivers/net/ethernet/sun/niu.c
6682
rp->tx_buffs[prod].mapping = mapping;
drivers/net/ethernet/sun/niu.c
6704
niu_set_txd(rp, prod, mapping, this_len, mrk, nfg);
drivers/net/ethernet/sun/niu.c
6708
mapping += this_len;
drivers/net/ethernet/sun/niu.c
6716
mapping = np->ops->map_page(np->device, skb_frag_page(frag),
drivers/net/ethernet/sun/niu.c
6719
if (np->ops->mapping_error(np->device, mapping))
drivers/net/ethernet/sun/niu.c
6723
rp->tx_buffs[prod].mapping = mapping;
drivers/net/ethernet/sun/niu.c
6725
niu_set_txd(rp, prod, mapping, len, 0, 0);
drivers/net/ethernet/sun/niu.c
6751
np->ops->unmap_page(np->device, rp->tx_buffs[prod].mapping,
drivers/net/ethernet/sun/niu.c
6755
np->ops->unmap_single(np->device, rp->tx_buffs[rp->prod].mapping,
drivers/net/ethernet/sun/niu.h
2836
u64 mapping;
drivers/net/ethernet/sun/sunbmac.c
958
u32 mapping;
drivers/net/ethernet/sun/sunbmac.c
961
mapping = dma_map_single(&bp->bigmac_op->dev, skb->data,
drivers/net/ethernet/sun/sunbmac.c
970
bp->bmac_block->be_txd[entry].tx_addr = mapping;
drivers/net/ethernet/sun/sungem.c
1010
dma_addr_t mapping;
drivers/net/ethernet/sun/sungem.c
1014
mapping = dma_map_page(&gp->pdev->dev,
drivers/net/ethernet/sun/sungem.c
1021
txd->buffer = cpu_to_le64(mapping);
drivers/net/ethernet/sun/sungem.c
1049
dma_addr_t mapping;
drivers/net/ethernet/sun/sungem.c
1053
mapping = skb_frag_dma_map(&gp->pdev->dev, this_frag,
drivers/net/ethernet/sun/sungem.c
1060
txd->buffer = cpu_to_le64(mapping);
drivers/net/ethernet/sun/sunhme.c
1224
u32 mapping;
drivers/net/ethernet/sun/sunhme.c
1235
mapping = dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE,
drivers/net/ethernet/sun/sunhme.c
1237
if (dma_mapping_error(hp->dma_dev, mapping)) {
drivers/net/ethernet/sun/sunhme.c
1244
mapping);
drivers/net/ethernet/sun/sunhme.c
1796
u32 mapping;
drivers/net/ethernet/sun/sunhme.c
1805
mapping = dma_map_single(hp->dma_dev, new_skb->data,
drivers/net/ethernet/sun/sunhme.c
1808
if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
drivers/net/ethernet/sun/sunhme.c
1818
mapping);
drivers/net/ethernet/sun/sunhme.c
2002
u32 mapping, len;
drivers/net/ethernet/sun/sunhme.c
2005
mapping = dma_map_single(hp->dma_dev, skb->data, len, DMA_TO_DEVICE);
drivers/net/ethernet/sun/sunhme.c
2006
if (unlikely(dma_mapping_error(hp->dma_dev, mapping)))
drivers/net/ethernet/sun/sunhme.c
2011
mapping);
drivers/net/ethernet/sun/sunhme.c
2029
u32 len, mapping, this_txflags;
drivers/net/ethernet/sun/sunhme.c
2032
mapping = skb_frag_dma_map(hp->dma_dev, this_frag,
drivers/net/ethernet/sun/sunhme.c
2034
if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
drivers/net/ethernet/sun/sunhme.c
2044
mapping);
drivers/net/ethernet/sunplus/spl2sw_define.h
218
u32 mapping;
drivers/net/ethernet/sunplus/spl2sw_desc.c
128
u32 mapping;
drivers/net/ethernet/sunplus/spl2sw_desc.c
146
mapping = dma_map_single(&comm->pdev->dev, skb->data,
drivers/net/ethernet/sunplus/spl2sw_desc.c
149
if (dma_mapping_error(&comm->pdev->dev, mapping))
drivers/net/ethernet/sunplus/spl2sw_desc.c
152
rx_skbinfo[j].mapping = mapping;
drivers/net/ethernet/sunplus/spl2sw_desc.c
153
rx_desc[j].addr1 = mapping;
drivers/net/ethernet/sunplus/spl2sw_desc.c
23
rx_desc[j].addr1 = rx_skbinfo[j].mapping;
drivers/net/ethernet/sunplus/spl2sw_desc.c
47
if (comm->tx_temp_skb_info[i].mapping) {
drivers/net/ethernet/sunplus/spl2sw_desc.c
48
dma_unmap_single(&comm->pdev->dev, comm->tx_temp_skb_info[i].mapping,
drivers/net/ethernet/sunplus/spl2sw_desc.c
50
comm->tx_temp_skb_info[i].mapping = 0;
drivers/net/ethernet/sunplus/spl2sw_desc.c
79
dma_unmap_single(&comm->pdev->dev, rx_skbinfo[j].mapping,
drivers/net/ethernet/sunplus/spl2sw_desc.c
83
rx_skbinfo[j].mapping = 0;
drivers/net/ethernet/sunplus/spl2sw_driver.c
105
skbinfo->mapping = mapping;
drivers/net/ethernet/sunplus/spl2sw_driver.c
117
txdesc->addr1 = skbinfo->mapping;
drivers/net/ethernet/sunplus/spl2sw_driver.c
73
u32 mapping;
drivers/net/ethernet/sunplus/spl2sw_driver.c
92
mapping = dma_map_single(&comm->pdev->dev, skb->data,
drivers/net/ethernet/sunplus/spl2sw_driver.c
94
if (dma_mapping_error(&comm->pdev->dev, mapping)) {
drivers/net/ethernet/sunplus/spl2sw_int.c
176
dma_unmap_single(&comm->pdev->dev, skbinfo->mapping, skbinfo->len,
drivers/net/ethernet/sunplus/spl2sw_int.c
178
skbinfo->mapping = 0;
drivers/net/ethernet/sunplus/spl2sw_int.c
60
dma_unmap_single(&comm->pdev->dev, sinfo->mapping,
drivers/net/ethernet/sunplus/spl2sw_int.c
79
sinfo->mapping = 0;
drivers/net/ethernet/sunplus/spl2sw_int.c
84
sinfo->mapping = dma_map_single(&comm->pdev->dev, new_skb->data,
drivers/net/ethernet/sunplus/spl2sw_int.c
87
if (dma_mapping_error(&comm->pdev->dev, sinfo->mapping)) {
drivers/net/ethernet/sunplus/spl2sw_int.c
92
sinfo->mapping = 0;
drivers/net/ethernet/sunplus/spl2sw_int.c
98
desc->addr1 = sinfo->mapping;
drivers/net/wireless/admtek/adm8211.c
1450
rx_info->mapping = dma_map_single(&priv->pdev->dev,
drivers/net/wireless/admtek/adm8211.c
1454
if (dma_mapping_error(&priv->pdev->dev, rx_info->mapping)) {
drivers/net/wireless/admtek/adm8211.c
1460
desc->buffer1 = cpu_to_le32(rx_info->mapping);
drivers/net/wireless/admtek/adm8211.c
1470
tx_info->mapping = 0;
drivers/net/wireless/admtek/adm8211.c
1492
priv->rx_buffers[i].mapping, RX_PKT_SIZE,
drivers/net/wireless/admtek/adm8211.c
1503
priv->tx_buffers[i].mapping,
drivers/net/wireless/admtek/adm8211.c
1627
dma_addr_t mapping;
drivers/net/wireless/admtek/adm8211.c
1631
mapping = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
drivers/net/wireless/admtek/adm8211.c
1633
if (dma_mapping_error(&priv->pdev->dev, mapping))
drivers/net/wireless/admtek/adm8211.c
1649
priv->tx_buffers[entry].mapping = mapping;
drivers/net/wireless/admtek/adm8211.c
1651
priv->tx_ring[entry].buffer1 = cpu_to_le32(mapping);
drivers/net/wireless/admtek/adm8211.c
326
dma_unmap_single(&priv->pdev->dev, info->mapping,
drivers/net/wireless/admtek/adm8211.c
385
priv->rx_buffers[entry].mapping,
drivers/net/wireless/admtek/adm8211.c
392
priv->rx_buffers[entry].mapping,
drivers/net/wireless/admtek/adm8211.c
402
priv->rx_buffers[entry].mapping,
drivers/net/wireless/admtek/adm8211.c
405
priv->rx_buffers[entry].mapping =
drivers/net/wireless/admtek/adm8211.c
411
priv->rx_buffers[entry].mapping)) {
drivers/net/wireless/admtek/adm8211.c
423
cpu_to_le32(priv->rx_buffers[entry].mapping);
drivers/net/wireless/admtek/adm8211.h
441
dma_addr_t mapping;
drivers/net/wireless/admtek/adm8211.h
446
dma_addr_t mapping;
drivers/net/wireless/intel/iwlegacy/3945-mac.c
592
dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
drivers/net/wireless/intel/iwlegacy/3945.c
656
dma_unmap_addr(&txq->meta[idx], mapping),
drivers/net/wireless/intel/iwlegacy/4965-mac.c
1840
dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
3935
dma_unmap_addr(&txq->meta[idx], mapping),
drivers/net/wireless/intel/iwlegacy/common.c
2825
dma_unmap_addr(&txq->meta[i], mapping),
drivers/net/wireless/intel/iwlegacy/common.c
2837
dma_unmap_addr(&txq->meta[i], mapping),
drivers/net/wireless/intel/iwlegacy/common.c
3217
dma_unmap_addr_set(out_meta, mapping, phys_addr);
drivers/net/wireless/intel/iwlegacy/common.c
3311
dma_unmap_single(&il->pci_dev->dev, dma_unmap_addr(meta, mapping),
drivers/net/wireless/intel/iwlegacy/common.h
109
DEFINE_DMA_UNMAP_ADDR(mapping);
drivers/net/wireless/intersil/p54/fwio.c
589
memset(edcf->mapping, 0, sizeof(edcf->mapping));
drivers/net/wireless/intersil/p54/lmac.h
405
u8 mapping[4];
drivers/net/wireless/intersil/p54/p54pci.c
151
dma_addr_t mapping;
drivers/net/wireless/intersil/p54/p54pci.c
156
mapping = dma_map_single(&priv->pdev->dev,
drivers/net/wireless/intersil/p54/p54pci.c
161
if (dma_mapping_error(&priv->pdev->dev, mapping)) {
drivers/net/wireless/intersil/p54/p54pci.c
168
desc->host_addr = cpu_to_le32(mapping);
drivers/net/wireless/intersil/p54/p54pci.c
334
dma_addr_t mapping;
drivers/net/wireless/intersil/p54/p54pci.c
343
mapping = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
drivers/net/wireless/intersil/p54/p54pci.c
345
if (dma_mapping_error(&priv->pdev->dev, mapping)) {
drivers/net/wireless/intersil/p54/p54pci.c
354
desc->host_addr = cpu_to_le32(mapping);
drivers/net/wireless/marvell/mwifiex/pcie.c
200
struct mwifiex_dma_mapping mapping;
drivers/net/wireless/marvell/mwifiex/pcie.c
202
mapping.addr = dma_map_single(&card->dev->dev, skb->data, size, flags);
drivers/net/wireless/marvell/mwifiex/pcie.c
203
if (dma_mapping_error(&card->dev->dev, mapping.addr)) {
drivers/net/wireless/marvell/mwifiex/pcie.c
207
mapping.len = size;
drivers/net/wireless/marvell/mwifiex/pcie.c
208
mwifiex_store_mapping(skb, &mapping);
drivers/net/wireless/marvell/mwifiex/pcie.c
216
struct mwifiex_dma_mapping mapping;
drivers/net/wireless/marvell/mwifiex/pcie.c
218
mwifiex_get_mapping(skb, &mapping);
drivers/net/wireless/marvell/mwifiex/pcie.c
219
dma_unmap_single(&card->dev->dev, mapping.addr, mapping.len, flags);
drivers/net/wireless/marvell/mwifiex/util.h
57
struct mwifiex_dma_mapping *mapping)
drivers/net/wireless/marvell/mwifiex/util.h
61
memcpy(&cb->dma_mapping, mapping, sizeof(*mapping));
drivers/net/wireless/marvell/mwifiex/util.h
65
struct mwifiex_dma_mapping *mapping)
drivers/net/wireless/marvell/mwifiex/util.h
69
memcpy(mapping, &cb->dma_mapping, sizeof(*mapping));
drivers/net/wireless/marvell/mwifiex/util.h
74
struct mwifiex_dma_mapping mapping;
drivers/net/wireless/marvell/mwifiex/util.h
76
mwifiex_get_mapping(skb, &mapping);
drivers/net/wireless/marvell/mwifiex/util.h
78
return mapping.addr;
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1023
dma_addr_t *mapping;
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1030
mapping = (dma_addr_t *)skb->cb;
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1031
*mapping = dma_map_single(&priv->pdev->dev,
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1035
if (dma_mapping_error(&priv->pdev->dev, *mapping)) {
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
1042
entry->rx_buf = cpu_to_le32(*mapping);
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
218
dma_addr_t mapping;
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
263
mapping = dma_map_single(&priv->pdev->dev,
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
267
if (dma_mapping_error(&priv->pdev->dev, mapping)) {
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
327
*((dma_addr_t *) skb->cb) = mapping;
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
467
dma_addr_t mapping;
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
480
mapping = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
483
if (dma_mapping_error(&priv->pdev->dev, mapping)) {
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
562
entry->tx_buf = cpu_to_le32(mapping);
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
497
dma_addr_t mapping;
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
518
mapping = dma_map_single(&rtlpci->pdev->dev, skb->data, skb->len,
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
520
if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
645
set_tx_desc_tx_buffer_address(pdesc, mapping);
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
679
dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data,
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
682
if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
704
set_tx_desc_tx_buffer_address(pdesc, mapping);
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
363
dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data,
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
368
if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
492
set_tx_desc_tx_buffer_address(pdesc, mapping);
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
528
dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data,
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
531
if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
552
set_tx_desc_tx_buffer_address(pdesc, mapping);
drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
212
set_tx_desc_tx_buffer_address(pdesc, mapping);
drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
244
dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data,
drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
247
if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
269
set_tx_desc_tx_buffer_address(pdesc, mapping);
drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
66
dma_addr_t mapping;
drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
84
mapping = dma_map_single(&rtlpci->pdev->dev, skb->data, skb->len,
drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
86
if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
661
dma_addr_t mapping;
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
680
mapping = dma_map_single(&rtlpci->pdev->dev, skb->data, skb->len,
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
682
if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
690
skb, mapping);
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
815
set_tx_desc_tx_buffer_address(pdesc, mapping);
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
838
dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data,
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
843
if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
865
set_tx_desc_tx_buffer_address(pdesc, mapping);
drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
331
dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data,
drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
335
if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
489
set_tx_desc_tx_buffer_address(pdesc, mapping);
drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
503
dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data,
drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
506
if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
526
set_tx_desc_tx_buffer_address(pdesc, mapping);
drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
545
set_tx_desc_tx_buffer_address(pdesc, mapping);
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
364
dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data,
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
368
if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
491
set_tx_desc_tx_buffer_address(pdesc, mapping);
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
529
dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data,
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
532
if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
554
set_tx_desc_tx_buffer_address(pdesc, mapping);
drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
423
dma_addr_t mapping;
drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
443
mapping = dma_map_single(&rtlpci->pdev->dev, skb->data, skb->len,
drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
445
if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
565
set_tx_desc_tx_buffer_address(pdesc, mapping);
drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
595
dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data,
drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
598
if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
620
set_tx_desc_tx_buffer_address(pdesc, mapping);
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
683
dma_addr_t mapping;
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
696
mapping = dma_map_single(&rtlpci->pdev->dev, skb->data, skb->len,
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
698
if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
808
set_tx_desc_tx_buffer_address(pdesc, mapping);
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
839
dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data,
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
842
if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
868
set_tx_desc_tx_buffer_address(pdesc, mapping);
drivers/net/wireless/ti/wlcore/main.c
1283
int q, mapping;
drivers/net/wireless/ti/wlcore/main.c
1293
mapping = skb_get_queue_mapping(skb);
drivers/net/wireless/ti/wlcore/main.c
1294
q = wl1271_tx_get_queue(mapping);
drivers/net/wwan/iosm/iosm_ipc_imem.c
311
IPC_CB(skb)->mapping,
drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
123
dma_addr_t mapping;
drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
126
ret = ipc_pcie_addr_map(ipc_pcie, buf, len, &mapping, DMA_TO_DEVICE);
drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
133
IPC_CB(skb)->mapping = mapping;
drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
480
dma_addr_t mapping = 0;
drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
483
ret = ipc_pcie_addr_map(ipc_imem->pcie, buf, count, &mapping,
drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
491
ipc_mmio_set_psi_addr_and_size(ipc_imem->mmio, mapping, count);
drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
566
ipc_pcie_addr_unmap(ipc_imem->pcie, count, mapping, DMA_TO_DEVICE);
drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
577
dma_addr_t mapping;
drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
598
skb = ipc_pcie_alloc_skb(ipc_devlink->pcie, count, GFP_KERNEL, &mapping,
drivers/net/wwan/iosm/iosm_ipc_mux.c
346
dma_addr_t mapping;
drivers/net/wwan/iosm/iosm_ipc_mux.c
349
&mapping, DMA_TO_DEVICE, 0);
drivers/net/wwan/iosm/iosm_ipc_mux_codec.c
129
dma_addr_t mapping;
drivers/net/wwan/iosm/iosm_ipc_mux_codec.c
133
GFP_ATOMIC, &mapping, DMA_TO_DEVICE, 0);
drivers/net/wwan/iosm/iosm_ipc_pcie.c
455
size_t size, dma_addr_t *mapping, int direction)
drivers/net/wwan/iosm/iosm_ipc_pcie.c
458
*mapping = dma_map_single(&ipc_pcie->pci->dev, data, size,
drivers/net/wwan/iosm/iosm_ipc_pcie.c
460
if (dma_mapping_error(&ipc_pcie->pci->dev, *mapping)) {
drivers/net/wwan/iosm/iosm_ipc_pcie.c
469
dma_addr_t mapping, int direction)
drivers/net/wwan/iosm/iosm_ipc_pcie.c
471
if (!mapping)
drivers/net/wwan/iosm/iosm_ipc_pcie.c
474
dma_unmap_single(&ipc_pcie->pci->dev, mapping, size, direction);
drivers/net/wwan/iosm/iosm_ipc_pcie.c
492
IPC_CB(skb)->mapping = 0;
drivers/net/wwan/iosm/iosm_ipc_pcie.c
498
gfp_t flags, dma_addr_t *mapping,
drivers/net/wwan/iosm/iosm_ipc_pcie.c
509
if (ipc_pcie_addr_map(ipc_pcie, skb->data, size, mapping, direction)) {
drivers/net/wwan/iosm/iosm_ipc_pcie.c
517
IPC_CB(skb)->mapping = *mapping;
drivers/net/wwan/iosm/iosm_ipc_pcie.c
529
ipc_pcie_addr_unmap(ipc_pcie, IPC_CB(skb)->len, IPC_CB(skb)->mapping,
drivers/net/wwan/iosm/iosm_ipc_pcie.c
531
IPC_CB(skb)->mapping = 0;
drivers/net/wwan/iosm/iosm_ipc_pcie.h
122
size_t size, dma_addr_t *mapping, int direction);
drivers/net/wwan/iosm/iosm_ipc_pcie.h
132
dma_addr_t mapping, int direction);
drivers/net/wwan/iosm/iosm_ipc_pcie.h
146
gfp_t flags, dma_addr_t *mapping,
drivers/net/wwan/iosm/iosm_ipc_pcie.h
89
dma_addr_t mapping;
drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c
267
td->buffer.address = IPC_CB(skb)->mapping;
drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c
310
if (p_td->buffer.address != IPC_CB(skb)->mapping) {
drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c
327
dma_addr_t mapping = 0;
drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c
350
&mapping, DMA_FROM_DEVICE,
drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c
355
td->buffer.address = mapping;
drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c
404
if (p_td->buffer.address != IPC_CB(skb)->mapping) {
drivers/net/xen-netback/common.h
258
u32 mapping[2][XEN_NETBK_MAX_HASH_MAPPING_SIZE];
drivers/net/xen-netback/hash.c
328
memset(vif->hash.mapping[vif->hash.mapping_sel], 0,
drivers/net/xen-netback/hash.c
337
u32 *mapping = vif->hash.mapping[!vif->hash.mapping_sel];
drivers/net/xen-netback/hash.c
343
.len = len * sizeof(*mapping),
drivers/net/xen-netback/hash.c
348
len > XEN_PAGE_SIZE / sizeof(*mapping))
drivers/net/xen-netback/hash.c
351
copy_op[0].dest.u.gmfn = virt_to_gfn(mapping + off);
drivers/net/xen-netback/hash.c
352
copy_op[0].dest.offset = xen_offset_in_page(mapping + off);
drivers/net/xen-netback/hash.c
356
copy_op[1].dest.u.gmfn = virt_to_gfn(mapping + off + len);
drivers/net/xen-netback/hash.c
363
memcpy(mapping, vif->hash.mapping[vif->hash.mapping_sel],
drivers/net/xen-netback/hash.c
364
vif->hash.size * sizeof(*mapping));
drivers/net/xen-netback/hash.c
375
if (mapping[off++] >= vif->num_queues)
drivers/net/xen-netback/hash.c
431
const u32 *mapping = vif->hash.mapping[vif->hash.mapping_sel];
drivers/net/xen-netback/hash.c
445
seq_printf(m, "%4u ", mapping[i]);
drivers/net/xen-netback/interface.c
200
return vif->hash.mapping[vif->hash.mapping_sel]
drivers/nvdimm/btt.c
105
return arena_write_bytes(arena, ns_off, &mapping, MAP_ENT_SIZE, flags);
drivers/nvdimm/btt.c
108
static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping,
drivers/nvdimm/btt.c
118
mapping = ent_lba(mapping);
drivers/nvdimm/btt.c
129
mapping |= MAP_ENT_NORMAL;
drivers/nvdimm/btt.c
132
mapping |= (1 << MAP_ERR_SHIFT);
drivers/nvdimm/btt.c
135
mapping |= (1 << MAP_TRIM_SHIFT);
drivers/nvdimm/btt.c
148
mapping_le = cpu_to_le32(mapping);
drivers/nvdimm/btt.c
152
static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping,
drivers/nvdimm/btt.c
183
*mapping = lba;
drivers/nvdimm/btt.c
186
*mapping = postmap;
drivers/nvdimm/btt.c
190
*mapping = postmap;
drivers/nvdimm/btt.c
194
*mapping = postmap;
drivers/nvdimm/btt.c
96
static int __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping,
drivers/nvdimm/label.c
1063
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
drivers/nvdimm/label.c
1095
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
drivers/nvdimm/namespace_devs.c
1159
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
drivers/nvdimm/namespace_devs.c
1178
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
drivers/nvdimm/namespace_devs.c
1563
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
drivers/nvdimm/namespace_devs.c
1610
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
drivers/nvdimm/namespace_devs.c
1718
struct nvdimm *nvdimm = nd_region->mapping[i].nvdimm;
drivers/nvdimm/namespace_devs.c
1746
nd_mapping = &nd_region->mapping[i];
drivers/nvdimm/namespace_devs.c
1887
struct nd_mapping *nd_mapping = &nd_region->mapping[0];
drivers/nvdimm/namespace_devs.c
1930
struct nd_mapping *nd_mapping = &nd_region->mapping[0];
drivers/nvdimm/namespace_devs.c
2002
nd_mapping = &nd_region->mapping[i];
drivers/nvdimm/namespace_devs.c
2043
nd_mapping = &nd_region->mapping[i];
drivers/nvdimm/namespace_devs.c
2052
nd_mapping = &nd_region->mapping[reverse];
drivers/nvdimm/namespace_devs.c
2065
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
drivers/nvdimm/namespace_devs.c
2085
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
drivers/nvdimm/namespace_devs.c
349
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
drivers/nvdimm/namespace_devs.c
624
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
drivers/nvdimm/namespace_devs.c
676
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
drivers/nvdimm/namespace_devs.c
708
struct nd_mapping *nd_mapping = &nd_region->mapping[0];
drivers/nvdimm/namespace_devs.c
789
nd_mapping = &nd_region->mapping[i];
drivers/nvdimm/namespace_devs.c
902
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
drivers/nvdimm/namespace_devs.c
974
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
drivers/nvdimm/namespace_devs.c
990
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
drivers/nvdimm/nd.h
425
struct nd_mapping mapping[] __counted_by(ndr_mappings);
drivers/nvdimm/ramdax.c
38
struct nd_mapping_desc mapping;
drivers/nvdimm/ramdax.c
51
memset(&mapping, 0, sizeof(mapping));
drivers/nvdimm/ramdax.c
52
mapping.nvdimm = nvdimm;
drivers/nvdimm/ramdax.c
53
mapping.start = 0;
drivers/nvdimm/ramdax.c
54
mapping.size = resource_size(res) - LABEL_AREA_SIZE;
drivers/nvdimm/ramdax.c
61
ndr_desc.mapping = &mapping;
drivers/nvdimm/region_devs.c
1008
kzalloc_flex(*nd_region, mapping, ndr_desc->num_mappings);
drivers/nvdimm/region_devs.c
1035
struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
drivers/nvdimm/region_devs.c
1036
struct nvdimm *nvdimm = mapping->nvdimm;
drivers/nvdimm/region_devs.c
1038
nd_region->mapping[i].nvdimm = nvdimm;
drivers/nvdimm/region_devs.c
1039
nd_region->mapping[i].start = mapping->start;
drivers/nvdimm/region_devs.c
1040
nd_region->mapping[i].size = mapping->size;
drivers/nvdimm/region_devs.c
1041
nd_region->mapping[i].position = mapping->position;
drivers/nvdimm/region_devs.c
1042
INIT_LIST_HEAD(&nd_region->mapping[i].labels);
drivers/nvdimm/region_devs.c
1043
mutex_init(&nd_region->mapping[i].lock);
drivers/nvdimm/region_devs.c
113
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
drivers/nvdimm/region_devs.c
1180
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
drivers/nvdimm/region_devs.c
158
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
drivers/nvdimm/region_devs.c
190
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
drivers/nvdimm/region_devs.c
238
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
drivers/nvdimm/region_devs.c
259
struct nd_mapping *nd_mapping = &nd_region->mapping[0];
drivers/nvdimm/region_devs.c
347
struct nd_mapping *nd_mapping = &nd_region->mapping[0];
drivers/nvdimm/region_devs.c
375
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
drivers/nvdimm/region_devs.c
395
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
drivers/nvdimm/region_devs.c
68
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
drivers/nvdimm/region_devs.c
697
nd_mapping = &nd_region->mapping[n];
drivers/nvdimm/region_devs.c
706
static ssize_t mapping##idx##_show(struct device *dev, \
drivers/nvdimm/region_devs.c
711
static DEVICE_ATTR_RO(mapping##idx)
drivers/nvdimm/region_devs.c
96
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
drivers/nvdimm/region_devs.c
992
struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
drivers/nvdimm/region_devs.c
993
struct nvdimm *nvdimm = mapping->nvdimm;
drivers/nvdimm/region_devs.c
995
if ((mapping->start | mapping->size) % PAGE_SIZE) {
drivers/pci/devres.c
461
void __iomem *mapping, int bar)
drivers/pci/devres.c
472
legacy_iomap_table[bar] = mapping;
drivers/pci/devres.c
537
void __iomem *mapping;
drivers/pci/devres.c
548
mapping = pci_iomap(pdev, bar, maxlen);
drivers/pci/devres.c
549
if (!mapping)
drivers/pci/devres.c
551
res->baseaddr = mapping;
drivers/pci/devres.c
553
if (pcim_add_mapping_to_legacy_table(pdev, mapping, bar) != 0)
drivers/pci/devres.c
557
return mapping;
drivers/pci/devres.c
560
pci_iounmap(pdev, mapping);
drivers/pci/devres.c
680
void __iomem *mapping;
drivers/pci/devres.c
686
mapping = pcim_iomap_region(pdev, bar, name);
drivers/pci/devres.c
687
if (IS_ERR(mapping)) {
drivers/pci/devres.c
688
ret = PTR_ERR(mapping);
drivers/pci/devres.c
691
ret = pcim_add_mapping_to_legacy_table(pdev, mapping, bar);
drivers/pci/devres.c
834
void __iomem *mapping;
drivers/pci/devres.c
844
mapping = pci_iomap_range(pdev, bar, offset, len);
drivers/pci/devres.c
845
if (!mapping) {
drivers/pci/devres.c
851
res->baseaddr = mapping;
drivers/pci/devres.c
859
return mapping;
drivers/perf/arm-cci.c
1270
int mapping;
drivers/perf/arm-cci.c
1272
mapping = pmu_map_event(event);
drivers/perf/arm-cci.c
1274
if (mapping < 0) {
drivers/perf/arm-cci.c
1277
return mapping;
drivers/perf/arm-cci.c
1293
hwc->config_base |= (unsigned long)mapping;
drivers/perf/arm_pmu.c
159
int mapping;
drivers/perf/arm_pmu.c
167
mapping = (*event_map)[config];
drivers/perf/arm_pmu.c
168
return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
drivers/perf/arm_pmu.c
461
int mapping, ret;
drivers/perf/arm_pmu.c
464
mapping = armpmu->map_event(event);
drivers/perf/arm_pmu.c
466
if (mapping < 0) {
drivers/perf/arm_pmu.c
469
return mapping;
drivers/perf/arm_pmu.c
495
hwc->config_base |= (unsigned long)mapping;
drivers/phy/qualcomm/phy-qcom-qmp-combo.c
4766
static void qmp_combo_find_lanes_orientation(const struct qmp_combo_lane_mapping *mapping,
drivers/phy/qualcomm/phy-qcom-qmp-combo.c
4774
if (mapping[i].lanes_count != lanes_count)
drivers/phy/qualcomm/phy-qcom-qmp-combo.c
4776
if (!memcmp(mapping[i].lanes, lanes, sizeof(u32) * lanes_count)) {
drivers/phy/qualcomm/phy-qcom-qmp-combo.c
4777
*orientation = mapping[i].orientation;
drivers/power/supply/gpio-charger.c
58
struct gpio_mapping mapping;
drivers/power/supply/gpio-charger.c
78
mapping = gpio_charger->current_limit_map[i];
drivers/power/supply/gpio-charger.c
81
bool val = (mapping.gpiodata >> i) & 1;
drivers/power/supply/gpio-charger.c
86
gpio_charger->charge_current_limit = mapping.limit_ua;
drivers/rapidio/devices/rio_mport_cdev.c
1068
u64 size, struct rio_mport_mapping **mapping)
drivers/rapidio/devices/rio_mport_cdev.c
1091
*mapping = map;
drivers/rapidio/devices/rio_mport_cdev.c
1101
struct rio_mport_mapping *mapping = NULL;
drivers/rapidio/devices/rio_mport_cdev.c
1107
ret = rio_mport_create_dma_mapping(md, filp, map.length, &mapping);
drivers/rapidio/devices/rio_mport_cdev.c
1111
map.dma_handle = mapping->phys_addr;
drivers/rapidio/devices/rio_mport_cdev.c
1115
kref_put(&mapping->ref, mport_release_mapping);
drivers/rapidio/devices/rio_mport_cdev.c
1182
struct rio_mport_mapping **mapping)
drivers/rapidio/devices/rio_mport_cdev.c
1218
*mapping = map;
drivers/rapidio/devices/rio_mport_cdev.c
1232
struct rio_mport_mapping **mapping)
drivers/rapidio/devices/rio_mport_cdev.c
1246
*mapping = map;
drivers/rapidio/devices/rio_mport_cdev.c
1261
return rio_mport_create_inbound_mapping(md, filp, raddr, size, mapping);
drivers/rapidio/devices/rio_mport_cdev.c
1269
struct rio_mport_mapping *mapping = NULL;
drivers/rapidio/devices/rio_mport_cdev.c
1280
map.length, &mapping);
drivers/rapidio/devices/rio_mport_cdev.c
1284
map.handle = mapping->phys_addr;
drivers/rapidio/devices/rio_mport_cdev.c
1285
map.rio_addr = mapping->rio_addr;
drivers/rapidio/devices/rio_mport_cdev.c
1289
if (ret == 0 && mapping->filp == filp) {
drivers/rapidio/devices/rio_mport_cdev.c
1291
kref_put(&mapping->ref, mport_release_mapping);
drivers/regulator/qcom_spmi-regulator.c
1690
const struct spmi_regulator_mapping *mapping;
drivers/regulator/qcom_spmi-regulator.c
1716
mapping = &supported_regulators[i];
drivers/regulator/qcom_spmi-regulator.c
1717
if (mapping->type == type && mapping->subtype == subtype
drivers/regulator/qcom_spmi-regulator.c
1718
&& mapping->revision_min <= dig_major_rev
drivers/regulator/qcom_spmi-regulator.c
1719
&& mapping->revision_max >= dig_major_rev)
drivers/regulator/qcom_spmi-regulator.c
1730
vreg->logical_type = mapping->logical_type;
drivers/regulator/qcom_spmi-regulator.c
1731
vreg->set_points = mapping->set_points;
drivers/regulator/qcom_spmi-regulator.c
1732
vreg->hpm_min_load = mapping->hpm_min_load;
drivers/regulator/qcom_spmi-regulator.c
1733
vreg->desc.ops = mapping->ops;
drivers/regulator/qcom_spmi-regulator.c
1735
if (mapping->set_points) {
drivers/regulator/qcom_spmi-regulator.c
1736
if (!mapping->set_points->n_voltages)
drivers/regulator/qcom_spmi-regulator.c
1737
spmi_calculate_num_voltages(mapping->set_points);
drivers/regulator/qcom_spmi-regulator.c
1738
vreg->desc.n_voltages = mapping->set_points->n_voltages;
drivers/remoteproc/omap_remoteproc.c
1330
if (pdev->dev.archdata.mapping) {
drivers/remoteproc/omap_remoteproc.c
1331
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(&pdev->dev);
drivers/remoteproc/omap_remoteproc.c
1334
arm_iommu_release_mapping(mapping);
drivers/remoteproc/remoteproc_core.c
619
struct rproc_mem_entry *mapping;
drivers/remoteproc/remoteproc_core.c
638
mapping = kzalloc_obj(*mapping);
drivers/remoteproc/remoteproc_core.c
639
if (!mapping)
drivers/remoteproc/remoteproc_core.c
656
mapping->da = rsc->da;
drivers/remoteproc/remoteproc_core.c
657
mapping->len = rsc->len;
drivers/remoteproc/remoteproc_core.c
658
list_add_tail(&mapping->node, &rproc->mappings);
drivers/remoteproc/remoteproc_core.c
666
kfree(mapping);
drivers/remoteproc/remoteproc_core.c
683
struct rproc_mem_entry *mapping = NULL;
drivers/remoteproc/remoteproc_core.c
730
mapping = kzalloc_obj(*mapping);
drivers/remoteproc/remoteproc_core.c
731
if (!mapping) {
drivers/remoteproc/remoteproc_core.c
750
mapping->da = mem->da;
drivers/remoteproc/remoteproc_core.c
751
mapping->len = mem->len;
drivers/remoteproc/remoteproc_core.c
752
list_add_tail(&mapping->node, &rproc->mappings);
drivers/remoteproc/remoteproc_core.c
772
kfree(mapping);
drivers/scsi/scsicam.c
35
struct address_space *mapping = dev->part0->bd_mapping;
drivers/scsi/scsicam.c
39
folio = read_mapping_folio(mapping, 0, NULL);
drivers/sh/clk/core.c
340
struct clk_mapping *mapping = clk->mapping;
drivers/sh/clk/core.c
345
if (!mapping) {
drivers/sh/clk/core.c
352
clk->mapping = &dummy_mapping;
drivers/sh/clk/core.c
361
mapping = clkp->mapping;
drivers/sh/clk/core.c
362
BUG_ON(!mapping);
drivers/sh/clk/core.c
368
if (!mapping->base && mapping->phys) {
drivers/sh/clk/core.c
369
kref_init(&mapping->ref);
drivers/sh/clk/core.c
371
mapping->base = ioremap(mapping->phys, mapping->len);
drivers/sh/clk/core.c
372
if (unlikely(!mapping->base))
drivers/sh/clk/core.c
374
} else if (mapping->base) {
drivers/sh/clk/core.c
378
kref_get(&mapping->ref);
drivers/sh/clk/core.c
381
clk->mapping = mapping;
drivers/sh/clk/core.c
383
clk->mapped_reg = clk->mapping->base;
drivers/sh/clk/core.c
384
clk->mapped_reg += (phys_addr_t)clk->enable_reg - clk->mapping->phys;
drivers/sh/clk/core.c
390
struct clk_mapping *mapping;
drivers/sh/clk/core.c
392
mapping = container_of(kref, struct clk_mapping, ref);
drivers/sh/clk/core.c
394
iounmap(mapping->base);
drivers/sh/clk/core.c
399
struct clk_mapping *mapping = clk->mapping;
drivers/sh/clk/core.c
402
if (mapping == &dummy_mapping)
drivers/sh/clk/core.c
405
kref_put(&mapping->ref, clk_destroy_mapping);
drivers/sh/clk/core.c
406
clk->mapping = NULL;
drivers/sh/clk/cpg.c
402
value = __raw_readl(clk->mapping->base);
drivers/sh/clk/cpg.c
418
__raw_writel(0, clk->mapping->base);
drivers/sh/clk/cpg.c
425
value = __raw_readl(clk->mapping->base) >> 16;
drivers/sh/clk/cpg.c
429
__raw_writel((value << 16) | 0x3, clk->mapping->base);
drivers/sh/clk/cpg.c
440
__raw_writel(0, clk->mapping->base);
drivers/sh/clk/cpg.c
442
__raw_writel(idx << 16, clk->mapping->base);
drivers/sh/clk/cpg.c
474
clks[i].mapping = map;
drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
190
const struct mapping_table *mapping = dlvr_mapping;\
drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
213
err = get_mapped_string(mapping, attr->attr.name, ret, &str);\
drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
227
const struct mapping_table *mapping = dlvr_mapping;\
drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
252
err = get_mapped_value(mapping, attr->attr.name, buf, &input);\
drivers/vfio/pci/nvgrace-gpu/main.c
161
region->pfn_address_space.mapping = core_vdev->inode->i_mapping;
drivers/video/fbdev/core/fb_defio.c
139
BUG_ON(!info->fbdefio->mapping);
drivers/video/fbdev/core/fb_defio.c
274
mapping_wrprotect_range(fbdefio->mapping, pgoff,
drivers/video/fbdev/core/fb_defio.c
332
fbdefio->mapping = file->f_mapping;
drivers/video/fbdev/core/fb_defio.c
360
fbdefio->mapping = NULL;
fs/adfs/inode.c
37
static int adfs_writepages(struct address_space *mapping,
fs/adfs/inode.c
40
return mpage_writepages(mapping, wbc, adfs_get_block);
fs/adfs/inode.c
48
static void adfs_write_failed(struct address_space *mapping, loff_t to)
fs/adfs/inode.c
50
struct inode *inode = mapping->host;
fs/adfs/inode.c
57
struct address_space *mapping,
fs/adfs/inode.c
63
ret = cont_write_begin(iocb, mapping, pos, len, foliop, fsdata,
fs/adfs/inode.c
65
&ADFS_I(mapping->host)->mmu_private);
fs/adfs/inode.c
67
adfs_write_failed(mapping, pos + len);
fs/adfs/inode.c
72
static sector_t _adfs_bmap(struct address_space *mapping, sector_t block)
fs/adfs/inode.c
74
return generic_block_bmap(mapping, block, adfs_get_block);
fs/affs/file.c
375
static int affs_writepages(struct address_space *mapping,
fs/affs/file.c
378
return mpage_writepages(mapping, wbc, affs_get_block);
fs/affs/file.c
386
static void affs_write_failed(struct address_space *mapping, loff_t to)
fs/affs/file.c
388
struct inode *inode = mapping->host;
fs/affs/file.c
400
struct address_space *mapping = file->f_mapping;
fs/affs/file.c
401
struct inode *inode = mapping->host;
fs/affs/file.c
415
affs_write_failed(mapping, offset + count);
fs/affs/file.c
420
struct address_space *mapping,
fs/affs/file.c
426
ret = cont_write_begin(iocb, mapping, pos, len, foliop, fsdata,
fs/affs/file.c
428
&AFFS_I(mapping->host)->mmu_private);
fs/affs/file.c
430
affs_write_failed(mapping, pos + len);
fs/affs/file.c
436
struct address_space *mapping, loff_t pos,
fs/affs/file.c
440
struct inode *inode = mapping->host;
fs/affs/file.c
443
ret = generic_write_end(iocb, mapping, pos, len, copied, folio, fsdata);
fs/affs/file.c
454
static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
fs/affs/file.c
456
return generic_block_bmap(mapping,block,affs_get_block);
fs/affs/file.c
530
struct inode *inode = folio->mapping->host;
fs/affs/file.c
633
struct inode *inode = folio->mapping->host;
fs/affs/file.c
652
struct address_space *mapping,
fs/affs/file.c
656
struct inode *inode = mapping->host;
fs/affs/file.c
673
folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
fs/affs/file.c
674
mapping_gfp_mask(mapping));
fs/affs/file.c
692
struct address_space *mapping,
fs/affs/file.c
696
struct inode *inode = mapping->host;
fs/affs/file.c
888
struct address_space *mapping = inode->i_mapping;
fs/affs/file.c
894
res = mapping->a_ops->write_begin(NULL, mapping, isize, 0, &folio, &fsdata);
fs/affs/file.c
896
res = mapping->a_ops->write_end(NULL, mapping, isize, 0, 0, folio, fsdata);
fs/affs/symlink.c
17
struct inode *inode = folio->mapping->host;
fs/afs/dir.c
2197
int afs_single_writepages(struct address_space *mapping,
fs/afs/dir.c
2200
struct afs_vnode *dvnode = AFS_FS_I(mapping->host);
fs/afs/dir.c
2216
ret = netfs_writeback_single(mapping, wbc, &iter);
fs/afs/internal.h
1107
int afs_single_writepages(struct address_space *mapping,
fs/afs/write.c
229
int afs_writepages(struct address_space *mapping, struct writeback_control *wbc)
fs/afs/write.c
231
struct afs_vnode *vnode = AFS_FS_I(mapping->host);
fs/afs/write.c
243
ret = netfs_writepages(mapping, wbc);
fs/aio.c
407
static int aio_migrate_folio(struct address_space *mapping, struct folio *dst,
fs/aio.c
416
spin_lock(&mapping->i_private_lock);
fs/aio.c
417
ctx = mapping->i_private_data;
fs/aio.c
447
rc = folio_migrate_mapping(mapping, dst, src, 1);
fs/aio.c
470
spin_unlock(&mapping->i_private_lock);
fs/befs/linuxvfs.c
119
befs_bmap(struct address_space *mapping, sector_t block)
fs/befs/linuxvfs.c
121
return generic_block_bmap(mapping, block, befs_get_block);
fs/befs/linuxvfs.c
46
static sector_t befs_bmap(struct address_space *mapping, sector_t block);
fs/befs/linuxvfs.c
473
struct inode *inode = folio->mapping->host;
fs/bfs/file.c
154
static int bfs_writepages(struct address_space *mapping,
fs/bfs/file.c
157
return mpage_writepages(mapping, wbc, bfs_get_block);
fs/bfs/file.c
165
static void bfs_write_failed(struct address_space *mapping, loff_t to)
fs/bfs/file.c
167
struct inode *inode = mapping->host;
fs/bfs/file.c
174
struct address_space *mapping,
fs/bfs/file.c
180
ret = block_write_begin(mapping, pos, len, foliop, bfs_get_block);
fs/bfs/file.c
182
bfs_write_failed(mapping, pos + len);
fs/bfs/file.c
187
static sector_t bfs_bmap(struct address_space *mapping, sector_t block)
fs/bfs/file.c
189
return generic_block_bmap(mapping, block, bfs_get_block);
fs/btrfs/compression.c
382
struct address_space *mapping = inode->i_mapping;
fs/btrfs/compression.c
417
folio = filemap_get_folio(mapping, pg_index);
fs/btrfs/compression.c
438
folio = filemap_alloc_folio(mapping_gfp_constraint(mapping, ~__GFP_FS),
fs/btrfs/compression.c
443
if (filemap_add_folio(mapping, folio, pg_index, GFP_NOFS)) {
fs/btrfs/compression.c
968
int btrfs_compress_filemap_get_folio(struct address_space *mapping, u64 start,
fs/btrfs/compression.c
977
in_folio = filemap_get_folio(mapping, start >> PAGE_SHIFT);
fs/btrfs/compression.c
979
struct btrfs_inode *inode = BTRFS_I(mapping->host);
fs/btrfs/compression.h
138
int btrfs_compress_filemap_get_folio(struct address_space *mapping, u64 start,
fs/btrfs/defrag.c
847
struct address_space *mapping = inode->vfs_inode.i_mapping;
fs/btrfs/defrag.c
848
gfp_t mask = btrfs_alloc_write_mask(mapping);
fs/btrfs/defrag.c
857
folio = __filemap_get_folio(mapping, index,
fs/btrfs/defrag.c
906
if (folio->mapping != mapping || !folio->private) {
fs/btrfs/defrag.c
920
if (folio->mapping != mapping || !folio->private) {
fs/btrfs/disk-io.c
3779
struct address_space *mapping = device->bdev->bd_mapping;
fs/btrfs/disk-io.c
3815
folio = __filemap_get_folio(mapping, bytenr >> PAGE_SHIFT,
fs/btrfs/disk-io.c
458
static int btree_migrate_folio(struct address_space *mapping,
fs/btrfs/disk-io.c
474
return migrate_folio(mapping, dst, src, mode);
fs/btrfs/disk-io.c
505
static bool btree_dirty_folio(struct address_space *mapping,
fs/btrfs/disk-io.c
508
struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
fs/btrfs/disk-io.c
521
return filemap_dirty_folio(mapping, folio);
fs/btrfs/disk-io.c
550
return filemap_dirty_folio(mapping, folio);
fs/btrfs/extent_io.c
1001
struct inode *inode = folio->mapping->host;
fs/btrfs/extent_io.c
1338
struct inode *vfs_inode = folio->mapping->host;
fs/btrfs/extent_io.c
1837
struct btrfs_inode *inode = BTRFS_I(folio->mapping->host);
fs/btrfs/extent_io.c
1914
mapping_set_error(folio->mapping, ret);
fs/btrfs/extent_io.c
2318
int btree_writepages(struct address_space *mapping, struct writeback_control *wbc)
fs/btrfs/extent_io.c
2321
struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
fs/btrfs/extent_io.c
2334
index = ((mapping->writeback_index << PAGE_SHIFT) >> fs_info->nodesize_bits);
fs/btrfs/extent_io.c
2455
static int extent_write_cache_pages(struct address_space *mapping,
fs/btrfs/extent_io.c
2459
struct inode *inode = mapping->host;
fs/btrfs/extent_io.c
2486
index = mapping->writeback_index; /* Start from prev offset */
fs/btrfs/extent_io.c
2516
tag_pages_for_writeback(mapping, index, end);
fs/btrfs/extent_io.c
2519
(nr_folios = filemap_get_folios_tag(mapping, &index,
fs/btrfs/extent_io.c
2537
if (unlikely(folio->mapping != mapping)) {
fs/btrfs/extent_io.c
2616
mapping->writeback_index = done_index;
fs/btrfs/extent_io.c
2633
struct address_space *mapping = inode->i_mapping;
fs/btrfs/extent_io.c
265
static void __process_folios_contig(struct address_space *mapping,
fs/btrfs/extent_io.c
2653
folio = filemap_get_folio(mapping, cur >> PAGE_SHIFT);
fs/btrfs/extent_io.c
2664
mapping_set_error(mapping, PTR_ERR(folio));
fs/btrfs/extent_io.c
2687
mapping_set_error(mapping, ret);
fs/btrfs/extent_io.c
269
struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
fs/btrfs/extent_io.c
2699
int btrfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
fs/btrfs/extent_io.c
2701
struct inode *inode = mapping->host;
fs/btrfs/extent_io.c
2713
ret = extent_write_cache_pages(mapping, &bio_ctrl);
fs/btrfs/extent_io.c
2727
struct inode *vfs_inode = rac->mapping->host;
fs/btrfs/extent_io.c
279
found_folios = filemap_get_folios_contig(mapping, &index,
fs/btrfs/extent_io.c
2919
lockdep_assert_held(&folio->mapping->i_private_lock);
fs/btrfs/extent_io.c
2932
struct address_space *mapping = folio->mapping;
fs/btrfs/extent_io.c
2940
spin_lock(&mapping->i_private_lock);
fs/btrfs/extent_io.c
2944
spin_unlock(&mapping->i_private_lock);
fs/btrfs/extent_io.c
2963
spin_unlock(&mapping->i_private_lock);
fs/btrfs/extent_io.c
2986
spin_unlock(&mapping->i_private_lock);
fs/btrfs/extent_io.c
307
struct address_space *mapping = inode->i_mapping;
fs/btrfs/extent_io.c
317
found_folios = filemap_get_folios_contig(mapping, &index,
fs/btrfs/extent_io.c
3257
lockdep_assert_held(&folio->mapping->i_private_lock);
fs/btrfs/extent_io.c
331
if (!folio_test_dirty(folio) || folio->mapping != mapping) {
fs/btrfs/extent_io.c
3333
struct address_space *mapping = fs_info->btree_inode->i_mapping;
fs/btrfs/extent_io.c
3345
ret = filemap_add_folio(mapping, eb->folios[i], index + i,
fs/btrfs/extent_io.c
3350
existing_folio = filemap_lock_folio(mapping, index + i);
fs/btrfs/extent_io.c
3365
spin_lock(&mapping->i_private_lock);
fs/btrfs/extent_io.c
3377
spin_unlock(&mapping->i_private_lock);
fs/btrfs/extent_io.c
3401
spin_unlock(&mapping->i_private_lock);
fs/btrfs/extent_io.c
3716
xa_lock_irq(&folio->mapping->i_pages);
fs/btrfs/extent_io.c
3718
__xa_clear_mark(&folio->mapping->i_pages, folio->index,
fs/btrfs/extent_io.c
3720
xa_unlock_irq(&folio->mapping->i_pages);
fs/btrfs/extent_io.c
4528
spin_lock(&folio->mapping->i_private_lock);
fs/btrfs/extent_io.c
4533
spin_unlock(&folio->mapping->i_private_lock);
fs/btrfs/extent_io.c
4548
spin_lock(&folio->mapping->i_private_lock);
fs/btrfs/extent_io.c
4550
spin_unlock(&folio->mapping->i_private_lock);
fs/btrfs/extent_io.c
4565
spin_unlock(&folio->mapping->i_private_lock);
fs/btrfs/extent_io.c
4568
spin_unlock(&folio->mapping->i_private_lock);
fs/btrfs/extent_io.c
544
mapping_set_error(folio->mapping, error);
fs/btrfs/extent_io.c
882
if (folio->mapping)
fs/btrfs/extent_io.c
883
lockdep_assert_held(&folio->mapping->i_private_lock);
fs/btrfs/extent_io.c
912
ASSERT(folio->mapping);
fs/btrfs/extent_io.c
930
ASSERT(folio->mapping);
fs/btrfs/extent_io.h
239
int btrfs_writepages(struct address_space *mapping, struct writeback_control *wbc);
fs/btrfs/extent_io.h
240
int btree_writepages(struct address_space *mapping, struct writeback_control *wbc);
fs/btrfs/file.c
1922
if ((folio->mapping != inode->vfs_inode.i_mapping) ||
fs/btrfs/file.c
2044
struct address_space *mapping = filp->f_mapping;
fs/btrfs/file.c
2048
if (!mapping->a_ops->read_folio)
fs/btrfs/file.c
3874
struct address_space *mapping = inode->vfs_inode.i_mapping;
fs/btrfs/file.c
3891
ret = filemap_fdatawrite_range(mapping, start, end);
fs/btrfs/file.c
3893
ret = filemap_fdatawrite_range(mapping, start, end);
fs/btrfs/file.c
831
if (folio->mapping != inode->i_mapping || !folio_test_private(folio)) {
fs/btrfs/free-space-cache.c
476
if (folio->mapping != inode->i_mapping) {
fs/btrfs/fs.h
962
struct folio *: (_folio))->mapping->host))
fs/btrfs/fs.h
969
static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping)
fs/btrfs/fs.h
971
return mapping_gfp_constraint(mapping, ~__GFP_FS);
fs/btrfs/inode.c
1060
mapping_set_error(mapping, -EIO);
fs/btrfs/inode.c
2880
if (!folio->mapping || !folio_test_dirty(folio) ||
fs/btrfs/inode.c
2958
mapping_set_error(folio->mapping, ret);
fs/btrfs/inode.c
2989
struct inode *inode = folio->mapping->host;
fs/btrfs/inode.c
4990
struct address_space *mapping = inode->vfs_inode.i_mapping;
fs/btrfs/inode.c
4997
folio = filemap_lock_folio(mapping, index);
fs/btrfs/inode.c
5005
if (folio->mapping != mapping) {
fs/btrfs/inode.c
5053
struct address_space *mapping = inode->vfs_inode.i_mapping;
fs/btrfs/inode.c
5062
gfp_t mask = btrfs_alloc_write_mask(mapping);
fs/btrfs/inode.c
5133
folio = __filemap_get_folio(mapping, index,
fs/btrfs/inode.c
5149
if (folio->mapping != mapping) {
fs/btrfs/inode.c
7652
static int btrfs_migrate_folio(struct address_space *mapping,
fs/btrfs/inode.c
7656
int ret = filemap_migrate_folio(mapping, dst, src, mode);
fs/btrfs/inode.c
938
struct address_space *mapping = inode->vfs_inode.i_mapping;
fs/btrfs/lzo.c
286
struct address_space *mapping = inode->vfs_inode.i_mapping;
fs/btrfs/lzo.c
317
ret = btrfs_compress_filemap_get_folio(mapping, cur_in, &folio_in);
fs/btrfs/ordered-data.c
360
ASSERT(folio->mapping);
fs/btrfs/reflink.c
69
struct address_space *mapping = inode->vfs_inode.i_mapping;
fs/btrfs/reflink.c
85
folio = __filemap_get_folio(mapping, file_offset >> PAGE_SHIFT,
fs/btrfs/reflink.c
87
btrfs_alloc_write_mask(mapping));
fs/btrfs/relocation.c
2832
if (folio->mapping != inode->i_mapping) {
fs/btrfs/send.c
5205
struct address_space *mapping = sctx->cur_inode->i_mapping;
fs/btrfs/send.c
5218
folio = filemap_lock_folio(mapping, index);
fs/btrfs/send.c
5220
page_cache_sync_readahead(mapping,
fs/btrfs/send.c
5224
folio = filemap_grab_folio(mapping, index);
fs/btrfs/send.c
5234
page_cache_async_readahead(mapping, &sctx->ra, NULL, folio,
fs/btrfs/send.c
5250
if (folio->mapping != mapping) {
fs/btrfs/subpage.c
155
ASSERT(folio_test_private(folio) && folio->mapping);
fs/btrfs/subpage.c
156
lockdep_assert_held(&folio->mapping->i_private_lock);
fs/btrfs/subpage.c
169
ASSERT(folio_test_private(folio) && folio->mapping);
fs/btrfs/subpage.c
170
lockdep_assert_held(&folio->mapping->i_private_lock);
fs/btrfs/subpage.c
188
if (folio->mapping)
fs/btrfs/subpage.c
79
if (folio->mapping)
fs/btrfs/subpage.h
110
if (folio->mapping && folio->mapping->host)
fs/btrfs/subpage.h
111
ASSERT(is_data_inode(BTRFS_I(folio->mapping->host)));
fs/btrfs/transaction.c
1136
struct address_space *mapping = fs_info->btree_inode->i_mapping;
fs/btrfs/transaction.c
1166
ret = filemap_fdatawrite_range(mapping, start, end);
fs/btrfs/volumes.c
1345
struct address_space *mapping = bdev->bd_mapping;
fs/btrfs/volumes.c
1367
invalidate_inode_pages2_range(mapping, bytenr >> PAGE_SHIFT,
fs/btrfs/volumes.c
1371
filemap_invalidate_lock(mapping);
fs/btrfs/volumes.c
1372
page = read_cache_page_gfp(mapping, bytenr >> PAGE_SHIFT, GFP_NOFS);
fs/btrfs/volumes.c
1373
filemap_invalidate_unlock(mapping);
fs/btrfs/zlib.c
115
static int copy_data_into_buffer(struct address_space *mapping,
fs/btrfs/zlib.c
131
ret = btrfs_compress_filemap_get_folio(mapping, cur, &folio);
fs/btrfs/zlib.c
153
struct address_space *mapping = inode->vfs_inode.i_mapping;
fs/btrfs/zlib.c
205
ret = copy_data_into_buffer(mapping, workspace,
fs/btrfs/zlib.c
220
ret = btrfs_compress_filemap_get_folio(mapping,
fs/btrfs/zoned.c
125
struct address_space *mapping = bdev->bd_mapping;
fs/btrfs/zoned.c
134
page[i] = read_cache_page_gfp(mapping,
fs/btrfs/zstd.c
404
struct address_space *mapping = inode->vfs_inode.i_mapping;
fs/btrfs/zstd.c
434
ret = btrfs_compress_filemap_get_folio(mapping, start, &in_folio);
fs/btrfs/zstd.c
512
ret = btrfs_compress_filemap_get_folio(mapping, cur, &in_folio);
fs/buffer.c
1042
struct address_space *mapping = bdev->bd_mapping;
fs/buffer.c
1047
folio = __filemap_get_folio(mapping, index,
fs/buffer.c
1081
spin_lock(&mapping->i_private_lock);
fs/buffer.c
1084
spin_unlock(&mapping->i_private_lock);
fs/buffer.c
1198
struct address_space *mapping = NULL;
fs/buffer.c
1201
mapping = folio->mapping;
fs/buffer.c
1202
if (mapping)
fs/buffer.c
1203
__folio_mark_dirty(folio, mapping, 0);
fs/buffer.c
1205
if (mapping)
fs/buffer.c
1206
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
fs/buffer.c
1215
if (bh->b_folio && bh->b_folio->mapping)
fs/buffer.c
1216
mapping_set_error(bh->b_folio->mapping, -EIO);
fs/buffer.c
1249
struct address_space *buffer_mapping = bh->b_folio->mapping;
fs/buffer.c
1697
spin_lock(&folio->mapping->i_private_lock);
fs/buffer.c
1709
spin_unlock(&folio->mapping->i_private_lock);
fs/buffer.c
1979
mapping_set_error(folio->mapping, err);
fs/buffer.c
2112
struct inode *inode = folio->mapping->host;
fs/buffer.c
2241
int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
fs/buffer.c
2248
folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
fs/buffer.c
2249
mapping_gfp_mask(mapping));
fs/buffer.c
2297
int generic_write_end(const struct kiocb *iocb, struct address_space *mapping,
fs/buffer.c
2301
struct inode *inode = mapping->host;
fs/buffer.c
2388
struct inode *inode = folio->mapping->host;
fs/buffer.c
2472
struct address_space *mapping = inode->i_mapping;
fs/buffer.c
2473
const struct address_space_operations *aops = mapping->a_ops;
fs/buffer.c
2482
err = aops->write_begin(NULL, mapping, size, 0, &folio, &fsdata);
fs/buffer.c
2486
err = aops->write_end(NULL, mapping, size, 0, 0, folio, fsdata);
fs/buffer.c
2495
struct address_space *mapping,
fs/buffer.c
2498
struct inode *inode = mapping->host;
fs/buffer.c
2499
const struct address_space_operations *aops = mapping->a_ops;
fs/buffer.c
2519
err = aops->write_begin(iocb, mapping, curpos, len,
fs/buffer.c
2524
err = aops->write_end(iocb, mapping, curpos, len, len,
fs/buffer.c
2531
balance_dirty_pages_ratelimited(mapping);
fs/buffer.c
2552
err = aops->write_begin(iocb, mapping, curpos, len,
fs/buffer.c
2557
err = aops->write_end(iocb, mapping, curpos, len, len,
fs/buffer.c
2572
int cont_write_begin(const struct kiocb *iocb, struct address_space *mapping,
fs/buffer.c
2576
struct inode *inode = mapping->host;
fs/buffer.c
2581
err = cont_expand_zero(iocb, mapping, pos, bytes);
fs/buffer.c
2591
return block_write_begin(mapping, pos, len, foliop, get_block);
fs/buffer.c
2624
if ((folio->mapping != inode->i_mapping) ||
fs/buffer.c
2651
int block_truncate_page(struct address_space *mapping,
fs/buffer.c
2658
struct inode *inode = mapping->host;
fs/buffer.c
2673
folio = filemap_grab_folio(mapping, index);
fs/buffer.c
2728
struct inode * const inode = folio->mapping->host;
fs/buffer.c
2753
sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
fs/buffer.c
2756
struct inode *inode = mapping->host;
fs/buffer.c
2940
struct address_space * const mapping = folio->mapping;
fs/buffer.c
2952
if (mapping == NULL) { /* can this still happen? */
fs/buffer.c
2957
spin_lock(&mapping->i_private_lock);
fs/buffer.c
2976
spin_unlock(&mapping->i_private_lock);
fs/buffer.c
351
struct inode *inode = bh->b_folio->mapping->host;
fs/buffer.c
574
int sync_mapping_buffers(struct address_space *mapping)
fs/buffer.c
576
struct address_space *buffer_mapping = mapping->i_private_data;
fs/buffer.c
578
if (buffer_mapping == NULL || list_empty(&mapping->i_private_list))
fs/buffer.c
582
&mapping->i_private_list);
fs/buffer.c
677
struct address_space *mapping = inode->i_mapping;
fs/buffer.c
678
struct address_space *buffer_mapping = bh->b_folio->mapping;
fs/buffer.c
681
if (!mapping->i_private_data) {
fs/buffer.c
682
mapping->i_private_data = buffer_mapping;
fs/buffer.c
684
BUG_ON(mapping->i_private_data != buffer_mapping);
fs/buffer.c
689
&mapping->i_private_list);
fs/buffer.c
690
bh->b_assoc_map = mapping;
fs/buffer.c
728
bool block_dirty_folio(struct address_space *mapping, struct folio *folio)
fs/buffer.c
733
spin_lock(&mapping->i_private_lock);
fs/buffer.c
748
spin_unlock(&mapping->i_private_lock);
fs/buffer.c
751
__folio_mark_dirty(folio, mapping, 1);
fs/buffer.c
754
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
fs/buffer.c
782
struct address_space *mapping;
fs/buffer.c
792
mapping = bh->b_assoc_map;
fs/buffer.c
799
bh->b_assoc_map = mapping;
fs/buffer.c
831
mapping = bh->b_assoc_map;
fs/buffer.c
838
&mapping->i_private_list);
fs/buffer.c
839
bh->b_assoc_map = mapping;
fs/buffer.c
868
struct address_space *mapping = &inode->i_data;
fs/buffer.c
869
struct list_head *list = &mapping->i_private_list;
fs/buffer.c
870
struct address_space *buffer_mapping = mapping->i_private_data;
fs/buffer.c
891
struct address_space *mapping = &inode->i_data;
fs/buffer.c
892
struct list_head *list = &mapping->i_private_list;
fs/buffer.c
893
struct address_space *buffer_mapping = mapping->i_private_data;
fs/ceph/addr.c
1001
struct inode *inode = mapping->host;
fs/ceph/addr.c
1027
void ceph_init_writeback_ctl(struct address_space *mapping,
fs/ceph/addr.c
1035
ceph_wbc->wsize = ceph_define_write_size(mapping);
fs/ceph/addr.c
1045
ceph_wbc->start_index = wbc->range_cyclic ? mapping->writeback_index : 0;
fs/ceph/addr.c
1064
int ceph_define_writeback_range(struct address_space *mapping,
fs/ceph/addr.c
1068
struct inode *inode = mapping->host;
fs/ceph/addr.c
1134
int ceph_check_page_before_write(struct address_space *mapping,
fs/ceph/addr.c
1139
struct inode *inode = mapping->host;
fs/ceph/addr.c
1145
if (unlikely(!folio_test_dirty(folio) || folio->mapping != mapping)) {
fs/ceph/addr.c
1198
void ceph_allocate_page_array(struct address_space *mapping,
fs/ceph/addr.c
1202
struct inode *inode = mapping->host;
fs/ceph/addr.c
1245
static inline int move_dirty_folio_in_page_array(struct address_space *mapping,
fs/ceph/addr.c
1249
struct inode *inode = mapping->host;
fs/ceph/addr.c
1285
void ceph_process_folio_batch(struct address_space *mapping,
fs/ceph/addr.c
1289
struct inode *inode = mapping->host;
fs/ceph/addr.c
132
return ceph_fscache_dirty_folio(mapping, folio);
fs/ceph/addr.c
1321
rc = ceph_check_page_before_write(mapping, wbc,
fs/ceph/addr.c
1346
ceph_allocate_page_array(mapping, ceph_wbc, folio);
fs/ceph/addr.c
1365
rc = move_dirty_folio_in_page_array(mapping, wbc, ceph_wbc,
fs/ceph/addr.c
1401
int ceph_submit_write(struct address_space *mapping,
fs/ceph/addr.c
1405
struct inode *inode = mapping->host;
fs/ceph/addr.c
143
struct inode *inode = folio->mapping->host;
fs/ceph/addr.c
1588
void ceph_wait_until_current_writes_complete(struct address_space *mapping,
fs/ceph/addr.c
1601
(nr = filemap_get_folios_tag(mapping,
fs/ceph/addr.c
1622
static int ceph_writepages_start(struct address_space *mapping,
fs/ceph/addr.c
1625
struct inode *inode = mapping->host;
fs/ceph/addr.c
1638
if (is_forced_umount(mapping)) {
fs/ceph/addr.c
1643
ceph_init_writeback_ctl(mapping, wbc, &ceph_wbc);
fs/ceph/addr.c
1651
rc = ceph_define_writeback_range(mapping, wbc, &ceph_wbc);
fs/ceph/addr.c
1660
tag_pages_for_writeback(mapping, ceph_wbc.index, ceph_wbc.end);
fs/ceph/addr.c
1671
ceph_wbc.nr_folios = filemap_get_folios_tag(mapping,
fs/ceph/addr.c
1683
ceph_process_folio_batch(mapping, wbc, &ceph_wbc);
fs/ceph/addr.c
1698
rc = ceph_submit_write(mapping, wbc, &ceph_wbc);
fs/ceph/addr.c
1735
ceph_wait_until_current_writes_complete(mapping, wbc, &ceph_wbc);
fs/ceph/addr.c
1743
mapping->writeback_index = ceph_wbc.index;
fs/ceph/addr.c
1783
struct inode *inode = folio->mapping->host;
fs/ceph/addr.c
1859
struct address_space *mapping,
fs/ceph/addr.c
1882
struct address_space *mapping, loff_t pos,
fs/ceph/addr.c
2000
struct address_space *mapping = inode->i_mapping;
fs/ceph/addr.c
2003
filemap_invalidate_lock_shared(mapping);
fs/ceph/addr.c
2004
page = find_or_create_page(mapping, 0,
fs/ceph/addr.c
2005
mapping_gfp_constraint(mapping, ~__GFP_FS));
fs/ceph/addr.c
2026
filemap_invalidate_unlock_shared(mapping);
fs/ceph/addr.c
2146
struct address_space *mapping = inode->i_mapping;
fs/ceph/addr.c
2154
page = find_or_create_page(mapping, 0,
fs/ceph/addr.c
2155
mapping_gfp_constraint(mapping,
fs/ceph/addr.c
2350
struct address_space *mapping = desc->file->f_mapping;
fs/ceph/addr.c
2352
if (!mapping->a_ops->read_folio)
fs/ceph/addr.c
721
struct inode *inode = folio->mapping->host;
fs/ceph/addr.c
81
static bool ceph_dirty_folio(struct address_space *mapping, struct folio *folio)
fs/ceph/addr.c
83
struct inode *inode = mapping->host;
fs/ceph/addr.c
884
struct address_space *mapping = inode->i_mapping;
fs/ceph/addr.c
892
mapping_set_error(mapping, rc);
fs/ceph/addr.c
978
bool is_forced_umount(struct address_space *mapping)
fs/ceph/addr.c
980
struct inode *inode = mapping->host;
fs/ceph/addr.c
991
mapping_set_error(mapping, -EIO);
fs/ceph/addr.c
999
unsigned int ceph_define_write_size(struct address_space *mapping)
fs/coda/symlink.c
25
struct inode *inode = folio->mapping->host;
fs/cramfs/inode.c
195
struct address_space *mapping = sb->s_bdev->bd_mapping;
fs/cramfs/inode.c
226
file_ra_state_init(&ra, mapping);
fs/cramfs/inode.c
227
page_cache_sync_readahead(mapping, &ra, NULL, blocknr, BLKS_PER_BUF);
fs/cramfs/inode.c
233
page = read_mapping_page(mapping, blocknr + i, NULL);
fs/cramfs/inode.c
823
struct inode *inode = folio->mapping->host;
fs/crypto/crypto.c
175
const struct inode *inode = folio->mapping->host;
fs/crypto/crypto.c
258
const struct inode *inode = folio->mapping->host;
fs/crypto/inline_crypt.c
323
const struct address_space *mapping;
fs/crypto/inline_crypt.c
330
mapping = folio_mapping(folio);
fs/crypto/inline_crypt.c
331
if (!mapping)
fs/crypto/inline_crypt.c
333
inode = mapping->host;
fs/dax.c
1046
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
fs/dax.c
1053
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
fs/dax.c
1059
unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
fs/dax.c
1062
unmap_mapping_pages(mapping, index, 1, false);
fs/dax.c
1070
dax_disassociate_entry(entry, mapping, false);
fs/dax.c
1071
dax_associate_entry(new_entry, mapping, vmf->vma,
fs/dax.c
1101
struct address_space *mapping, void *entry)
fs/dax.c
1166
i_mmap_lock_read(mapping);
fs/dax.c
1167
vma_interval_tree_foreach(vma, &mapping->i_mmap, index, end) {
fs/dax.c
1171
i_mmap_unlock_read(mapping);
fs/dax.c
1186
trace_dax_writeback_one(mapping->host, index, count);
fs/dax.c
1199
int dax_writeback_mapping_range(struct address_space *mapping,
fs/dax.c
1202
XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT);
fs/dax.c
1203
struct inode *inode = mapping->host;
fs/dax.c
1212
if (mapping_empty(mapping) || wbc->sync_mode != WB_SYNC_ALL)
fs/dax.c
1217
tag_pages_for_writeback(mapping, xas.xa_index, end_index);
fs/dax.c
1221
ret = dax_writeback_one(&xas, dax_dev, mapping, entry);
fs/dax.c
1223
mapping_set_error(mapping, ret);
fs/dax.c
1377
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
fs/dax.c
1378
struct inode *inode = mapping->host;
fs/dax.c
1865
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
fs/dax.c
1866
XA_STATE(xas, &mapping->i_pages, vmf->pgoff);
fs/dax.c
1868
.inode = mapping->host,
fs/dax.c
1891
entry = grab_mapping_entry(&xas, mapping, 0);
fs/dax.c
1975
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
fs/dax.c
1976
XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER);
fs/dax.c
1978
.inode = mapping->host,
fs/dax.c
2012
entry = grab_mapping_entry(&xas, mapping, PMD_ORDER);
fs/dax.c
2096
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
fs/dax.c
2097
XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order);
fs/dax.c
2109
trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
fs/dax.c
2128
trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret);
fs/dax.c
353
return !folio->mapping && folio->share;
fs/dax.c
372
folio->mapping = NULL;
fs/dax.c
394
folio->mapping = NULL;
fs/dax.c
408
new_folio->mapping = NULL;
fs/dax.c
441
static void dax_associate_entry(void *entry, struct address_space *mapping,
fs/dax.c
452
if (shared && (folio->mapping || dax_folio_is_shared(folio))) {
fs/dax.c
453
if (folio->mapping)
fs/dax.c
460
WARN_ON_ONCE(folio->mapping);
fs/dax.c
463
folio->mapping = mapping;
fs/dax.c
468
static void dax_disassociate_entry(void *entry, struct address_space *mapping,
fs/dax.c
508
struct address_space *mapping = READ_ONCE(folio->mapping);
fs/dax.c
511
if (!mapping || !dax_mapping(mapping))
fs/dax.c
522
if (S_ISCHR(mapping->host->i_mode))
fs/dax.c
525
xas.xa = &mapping->i_pages;
fs/dax.c
527
if (mapping != folio->mapping) {
fs/dax.c
549
struct address_space *mapping = folio->mapping;
fs/dax.c
550
XA_STATE(xas, &mapping->i_pages, folio->index);
fs/dax.c
552
if (S_ISCHR(mapping->host->i_mode))
fs/dax.c
567
dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, pgoff_t index,
fs/dax.c
576
if (!dax_mapping(mapping))
fs/dax.c
579
xas.xa = &mapping->i_pages;
fs/dax.c
610
void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index,
fs/dax.c
613
XA_STATE(xas, &mapping->i_pages, index);
fs/dax.c
651
struct address_space *mapping, unsigned int order)
fs/dax.c
693
unmap_mapping_pages(mapping,
fs/dax.c
700
dax_disassociate_entry(entry, mapping, false);
fs/dax.c
703
mapping->nrpages -= PG_PMD_NR;
fs/dax.c
719
mapping->nrpages += 1UL << order;
fs/dax.c
724
if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM))
fs/dax.c
754
struct page *dax_layout_busy_page_range(struct address_space *mapping,
fs/dax.c
762
XA_STATE(xas, &mapping->i_pages, start_idx);
fs/dax.c
764
if (!dax_mapping(mapping))
fs/dax.c
784
unmap_mapping_pages(mapping, start_idx, end_idx - start_idx + 1, 0);
fs/dax.c
809
struct page *dax_layout_busy_page(struct address_space *mapping)
fs/dax.c
811
return dax_layout_busy_page_range(mapping, 0, LLONG_MAX);
fs/dax.c
815
static int __dax_invalidate_entry(struct address_space *mapping,
fs/dax.c
818
XA_STATE(xas, &mapping->i_pages, index);
fs/dax.c
830
dax_disassociate_entry(entry, mapping, trunc);
fs/dax.c
832
mapping->nrpages -= 1UL << dax_entry_order(entry);
fs/dax.c
840
static int __dax_clear_dirty_range(struct address_space *mapping,
fs/dax.c
843
XA_STATE(xas, &mapping->i_pages, start);
fs/dax.c
873
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
fs/dax.c
875
int ret = __dax_invalidate_entry(mapping, index, true);
fs/dax.c
888
void dax_delete_mapping_range(struct address_space *mapping,
fs/dax.c
894
XA_STATE(xas, &mapping->i_pages, start_idx);
fs/dax.c
909
dax_disassociate_entry(entry, mapping, true);
fs/dax.c
911
mapping->nrpages -= 1UL << dax_entry_order(entry);
fs/dax.c
992
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
fs/dax.c
995
return __dax_invalidate_entry(mapping, index, false);
fs/direct-io.c
1160
struct address_space *mapping = iocb->ki_filp->f_mapping;
fs/direct-io.c
1162
retval = filemap_write_and_wait_range(mapping, offset, end - 1);
fs/ecryptfs/crypto.c
360
ecryptfs_inode = folio->mapping->host;
fs/ecryptfs/crypto.c
429
ecryptfs_inode = folio->mapping->host;
fs/ecryptfs/mmap.c
117
page_virt, folio->mapping->host);
fs/ecryptfs/mmap.c
139
crypt_stat->extent_size, folio->mapping->host);
fs/ecryptfs/mmap.c
165
struct inode *inode = folio->mapping->host;
fs/ecryptfs/mmap.c
216
struct inode *inode = folio->mapping->host;
fs/ecryptfs/mmap.c
243
struct address_space *mapping,
fs/ecryptfs/mmap.c
252
folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
fs/ecryptfs/mmap.c
253
mapping_gfp_mask(mapping));
fs/ecryptfs/mmap.c
261
&ecryptfs_inode_to_private(mapping->host)->crypt_stat;
fs/ecryptfs/mmap.c
265
folio, index, 0, PAGE_SIZE, mapping->host);
fs/ecryptfs/mmap.c
292
mapping->host);
fs/ecryptfs/mmap.c
30
static int ecryptfs_writepages(struct address_space *mapping,
fs/ecryptfs/mmap.c
304
>= i_size_read(mapping->host)) {
fs/ecryptfs/mmap.c
324
if (prev_page_end_size > i_size_read(mapping->host)) {
fs/ecryptfs/mmap.c
338
if ((i_size_read(mapping->host) == prev_page_end_size)
fs/ecryptfs/mmap.c
36
while ((folio = writeback_iter(mapping, wbc, folio, &error))) {
fs/ecryptfs/mmap.c
43
mapping_set_error(mapping, error);
fs/ecryptfs/mmap.c
441
struct address_space *mapping,
fs/ecryptfs/mmap.c
448
struct inode *ecryptfs_inode = mapping->host;
fs/ecryptfs/mmap.c
503
static sector_t ecryptfs_bmap(struct address_space *mapping, sector_t block)
fs/ecryptfs/mmap.c
505
struct inode *lower_inode = ecryptfs_inode_to_lower(mapping->host);
fs/efs/inode.c
22
static sector_t _efs_bmap(struct address_space *mapping, sector_t block)
fs/efs/inode.c
24
return generic_block_bmap(mapping,block,efs_get_block);
fs/efs/symlink.c
19
struct inode *inode = folio->mapping->host;
fs/erofs/data.c
407
.realinode = erofs_real_inode(rac->mapping->host, &need_iput),
fs/erofs/data.c
41
folio = read_mapping_folio(buf->mapping, index, buf->file);
fs/erofs/data.c
417
static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
fs/erofs/data.c
419
return iomap_bmap(mapping, block, &erofs_iomap_ops);
fs/erofs/data.c
62
buf->mapping = sbi->metabox_inode->i_mapping;
fs/erofs/data.c
68
buf->mapping = buf->file->f_mapping;
fs/erofs/data.c
70
buf->mapping = sbi->dif0.fscache->inode->i_mapping;
fs/erofs/data.c
72
buf->mapping = sb->s_bdev->bd_mapping;
fs/erofs/dir.c
61
buf.mapping = dir->i_mapping;
fs/erofs/fileio.c
175
struct inode *realinode = erofs_real_inode(rac->mapping->host, &need_iput);
fs/erofs/fscache.c
211
struct erofs_fscache *ctx = folio->mapping->host->i_private;
fs/erofs/fscache.c
216
req = erofs_fscache_req_alloc(folio->mapping,
fs/erofs/fscache.c
228
iov_iter_xarray(&io->iter, ITER_DEST, &folio->mapping->i_pages,
fs/erofs/fscache.c
24
struct address_space *mapping; /* The mapping being accessed */
fs/erofs/fscache.c
243
struct address_space *mapping = req->mapping;
fs/erofs/fscache.c
244
struct inode *inode = mapping->host;
fs/erofs/fscache.c
269
iov_iter_xarray(&iter, ITER_DEST, &mapping->i_pages, pos, PAGE_SIZE);
fs/erofs/fscache.c
284
iov_iter_xarray(&iter, ITER_DEST, &mapping->i_pages, pos, count);
fs/erofs/fscache.c
304
iov_iter_xarray(&io->iter, ITER_DEST, &mapping->i_pages, pos, count);
fs/erofs/fscache.c
330
req = erofs_fscache_req_alloc(folio->mapping,
fs/erofs/fscache.c
349
req = erofs_fscache_req_alloc(rac->mapping,
fs/erofs/fscache.c
49
XA_STATE(xas, &req->mapping->i_pages, start_page);
fs/erofs/fscache.c
70
static struct erofs_fscache_rq *erofs_fscache_req_alloc(struct address_space *mapping,
fs/erofs/fscache.c
77
req->mapping = mapping;
fs/erofs/internal.h
205
struct address_space *mapping;
fs/erofs/namei.c
102
buf.mapping = dir->i_mapping;
fs/erofs/namei.c
174
buf.mapping = dir->i_mapping;
fs/erofs/xattr.c
508
buf.mapping = sbi->packed_inode->i_mapping;
fs/erofs/zdata.c
1103
return !page_folio(page)->mapping && !z_erofs_is_shortlived_page(page);
fs/erofs/zdata.c
113
return fo->mapping == MNGD_MAPPING(sbi);
fs/erofs/zdata.c
1495
struct address_space *mapping;
fs/erofs/zdata.c
1520
mapping = READ_ONCE(folio->mapping);
fs/erofs/zdata.c
1525
if (mapping && mapping != mc) {
fs/erofs/zdata.c
1533
if (likely(folio->mapping == mc)) {
fs/erofs/zdata.c
1883
struct inode *sharedinode = folio->mapping->host;
fs/erofs/zdata.c
1911
struct inode *sharedinode = rac->mapping->host;
fs/erofs/zdata.c
994
buf.mapping = packed_inode->i_mapping;
fs/exfat/file.c
596
struct address_space *mapping = inode->i_mapping;
fs/exfat/file.c
597
const struct address_space_operations *ops = mapping->a_ops;
fs/exfat/file.c
609
err = ops->write_begin(NULL, mapping, pos, len, &folio, NULL);
fs/exfat/file.c
616
err = ops->write_end(NULL, mapping, pos, len, len, folio, NULL);
fs/exfat/file.c
621
balance_dirty_pages_ratelimited(mapping);
fs/exfat/inode.c
399
struct address_space *mapping = rac->mapping;
fs/exfat/inode.c
400
struct inode *inode = mapping->host;
fs/exfat/inode.c
413
static int exfat_writepages(struct address_space *mapping,
fs/exfat/inode.c
416
if (unlikely(exfat_forced_shutdown(mapping->host->i_sb)))
fs/exfat/inode.c
419
return mpage_writepages(mapping, wbc, exfat_get_block);
fs/exfat/inode.c
422
static void exfat_write_failed(struct address_space *mapping, loff_t to)
fs/exfat/inode.c
424
struct inode *inode = mapping->host;
fs/exfat/inode.c
434
struct address_space *mapping,
fs/exfat/inode.c
440
if (unlikely(exfat_forced_shutdown(mapping->host->i_sb)))
fs/exfat/inode.c
443
ret = block_write_begin(mapping, pos, len, foliop, exfat_get_block);
fs/exfat/inode.c
446
exfat_write_failed(mapping, pos+len);
fs/exfat/inode.c
452
struct address_space *mapping,
fs/exfat/inode.c
456
struct inode *inode = mapping->host;
fs/exfat/inode.c
460
err = generic_write_end(iocb, mapping, pos, len, copied, folio, fsdata);
fs/exfat/inode.c
462
exfat_write_failed(mapping, pos+len);
fs/exfat/inode.c
480
struct address_space *mapping = iocb->ki_filp->f_mapping;
fs/exfat/inode.c
481
struct inode *inode = mapping->host;
fs/exfat/inode.c
495
exfat_write_failed(mapping, size);
fs/exfat/inode.c
521
static sector_t exfat_aop_bmap(struct address_space *mapping, sector_t block)
fs/exfat/inode.c
526
down_read(&EXFAT_I(mapping->host)->truncate_lock);
fs/exfat/inode.c
527
blocknr = generic_block_bmap(mapping, block, exfat_get_block);
fs/exfat/inode.c
528
up_read(&EXFAT_I(mapping->host)->truncate_lock);
fs/ext2/dir.c
102
struct inode *dir = folio->mapping->host;
fs/ext2/dir.c
193
struct address_space *mapping = dir->i_mapping;
fs/ext2/dir.c
194
struct folio *folio = read_mapping_folio(mapping, n, NULL);
fs/ext2/dir.c
574
struct inode *inode = folio->mapping->host;
fs/ext2/dir.c
87
struct address_space *mapping = folio->mapping;
fs/ext2/dir.c
88
struct inode *dir = mapping->host;
fs/ext2/ext2.h
742
void ext2_write_failed(struct address_space *mapping, loff_t to);
fs/ext2/inode.c
59
void ext2_write_failed(struct address_space *mapping, loff_t to)
fs/ext2/inode.c
61
struct inode *inode = mapping->host;
fs/ext2/inode.c
928
ext2_write_begin(const struct kiocb *iocb, struct address_space *mapping,
fs/ext2/inode.c
933
ret = block_write_begin(mapping, pos, len, foliop, ext2_get_block);
fs/ext2/inode.c
935
ext2_write_failed(mapping, pos + len);
fs/ext2/inode.c
940
struct address_space *mapping,
fs/ext2/inode.c
946
ret = generic_write_end(iocb, mapping, pos, len, copied, folio, fsdata);
fs/ext2/inode.c
948
ext2_write_failed(mapping, pos + len);
fs/ext2/inode.c
952
static sector_t ext2_bmap(struct address_space *mapping, sector_t block)
fs/ext2/inode.c
954
return generic_block_bmap(mapping,block,ext2_get_block);
fs/ext2/inode.c
958
ext2_writepages(struct address_space *mapping, struct writeback_control *wbc)
fs/ext2/inode.c
960
return mpage_writepages(mapping, wbc, ext2_get_block);
fs/ext2/inode.c
964
ext2_dax_writepages(struct address_space *mapping, struct writeback_control *wbc)
fs/ext2/inode.c
966
struct ext2_sb_info *sbi = EXT2_SB(mapping->host->i_sb);
fs/ext2/inode.c
968
return dax_writeback_mapping_range(mapping, sbi->s_daxdev, wbc);
fs/ext4/ext4.h
3655
extern int ext4_try_to_write_inline_data(struct address_space *mapping,
fs/ext4/ext4.h
3661
extern int ext4_generic_write_inline_data(struct address_space *mapping,
fs/ext4/ext4_jbd2.c
211
struct address_space *mapping = sb->s_bdev->bd_mapping;
fs/ext4/ext4_jbd2.c
221
if (errseq_check(&mapping->wb_err, READ_ONCE(sbi->s_bdev_wb_err))) {
fs/ext4/ext4_jbd2.c
223
err = errseq_check_and_advance(&mapping->wb_err, &sbi->s_bdev_wb_err);
fs/ext4/extents.c
4848
struct address_space *mapping = file->f_mapping;
fs/ext4/extents.c
4896
filemap_invalidate_lock(mapping);
fs/ext4/extents.c
4921
filemap_invalidate_unlock(mapping);
fs/ext4/extents.c
5520
struct address_space *mapping = inode->i_mapping;
fs/ext4/extents.c
5553
ret = filemap_write_and_wait_range(mapping, start, offset);
fs/ext4/extents.c
5555
ret = filemap_write_and_wait_range(mapping, end, LLONG_MAX);
fs/ext4/extents.c
5621
struct address_space *mapping = inode->i_mapping;
fs/ext4/extents.c
5651
ret = filemap_write_and_wait_range(mapping, start, LLONG_MAX);
fs/ext4/file.c
745
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
fs/ext4/file.c
751
filemap_invalidate_lock_shared(mapping);
fs/ext4/file.c
756
filemap_invalidate_unlock_shared(mapping);
fs/ext4/file.c
761
filemap_invalidate_lock_shared(mapping);
fs/ext4/file.c
773
filemap_invalidate_unlock_shared(mapping);
fs/ext4/file.c
776
filemap_invalidate_unlock_shared(mapping);
fs/ext4/inline.c
24
static int ext4_da_convert_inline_data_to_extent(struct address_space *mapping,
fs/ext4/inline.c
572
static int ext4_convert_inline_data_to_extent(struct address_space *mapping,
fs/ext4/inline.c
607
folio = __filemap_get_folio(mapping, 0, FGP_WRITEBEGIN | FGP_NOFS,
fs/ext4/inline.c
608
mapping_gfp_mask(mapping));
fs/ext4/inline.c
696
int ext4_generic_write_inline_data(struct address_space *mapping,
fs/ext4/inline.c
728
return ext4_convert_inline_data_to_extent(mapping, inode);
fs/ext4/inline.c
731
ret = ext4_da_convert_inline_data_to_extent(mapping, inode, fsdata);
fs/ext4/inline.c
738
folio = __filemap_get_folio(mapping, 0, FGP_WRITEBEGIN | FGP_NOFS,
fs/ext4/inline.c
739
mapping_gfp_mask(mapping));
fs/ext4/inline.c
783
int ext4_try_to_write_inline_data(struct address_space *mapping,
fs/ext4/inline.c
789
return ext4_convert_inline_data_to_extent(mapping, inode);
fs/ext4/inline.c
790
return ext4_generic_write_inline_data(mapping, inode, pos, len,
fs/ext4/inline.c
885
static int ext4_da_convert_inline_data_to_extent(struct address_space *mapping,
fs/ext4/inline.c
892
folio = __filemap_get_folio(mapping, 0, FGP_WRITEBEGIN,
fs/ext4/inline.c
893
mapping_gfp_mask(mapping));
fs/ext4/inode.c
1147
struct inode *inode = folio->mapping->host;
fs/ext4/inode.c
1171
struct inode *inode = folio->mapping->host;
fs/ext4/inode.c
1284
struct address_space *mapping,
fs/ext4/inode.c
1288
struct inode *inode = mapping->host;
fs/ext4/inode.c
1310
ret = ext4_try_to_write_inline_data(mapping, inode, pos, len,
fs/ext4/inode.c
1326
folio = write_begin_get_folio(iocb, mapping, index, len);
fs/ext4/inode.c
1353
if (folio->mapping != mapping) {
fs/ext4/inode.c
1438
struct address_space *mapping,
fs/ext4/inode.c
1443
struct inode *inode = mapping->host;
fs/ext4/inode.c
1543
struct address_space *mapping,
fs/ext4/inode.c
1548
struct inode *inode = mapping->host;
fs/ext4/inode.c
1722
struct address_space *mapping = inode->i_mapping;
fs/ext4/inode.c
1747
nr = filemap_get_folios(mapping, &index, end - 1, &fbatch);
fs/ext4/inode.c
2561
struct inode *inode = folio->mapping->host;
fs/ext4/inode.c
2618
struct address_space *mapping = mpd->inode->i_mapping;
fs/ext4/inode.c
2642
nr_folios = filemap_get_folios_tag(mapping, &index, end,
fs/ext4/inode.c
2686
unlikely(folio->mapping != mapping)) {
fs/ext4/inode.c
2770
struct address_space *mapping = inode->i_mapping;
fs/ext4/inode.c
2772
struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
fs/ext4/inode.c
2783
if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
fs/ext4/inode.c
2796
ret = ext4_emergency_state(mapping->host->i_sb);
fs/ext4/inode.c
2851
writeback_index = mapping->writeback_index;
fs/ext4/inode.c
2864
tag_pages_for_writeback(mapping, mpd->start_pos >> PAGE_SHIFT,
fs/ext4/inode.c
3004
mapping->writeback_index = mpd->start_pos >> PAGE_SHIFT;
fs/ext4/inode.c
3012
static int ext4_writepages(struct address_space *mapping,
fs/ext4/inode.c
3015
struct super_block *sb = mapping->host->i_sb;
fs/ext4/inode.c
3017
.inode = mapping->host,
fs/ext4/inode.c
3058
static int ext4_dax_writepages(struct address_space *mapping,
fs/ext4/inode.c
3063
struct inode *inode = mapping->host;
fs/ext4/inode.c
3073
ret = dax_writeback_mapping_range(mapping,
fs/ext4/inode.c
3116
struct address_space *mapping,
fs/ext4/inode.c
3123
struct inode *inode = mapping->host;
fs/ext4/inode.c
3133
return ext4_write_begin(iocb, mapping, pos,
fs/ext4/inode.c
3140
ret = ext4_generic_write_inline_data(mapping, inode, pos, len,
fs/ext4/inode.c
3149
folio = write_begin_get_folio(iocb, mapping, index, len);
fs/ext4/inode.c
3187
struct inode *inode = folio->mapping->host;
fs/ext4/inode.c
3202
static int ext4_da_do_write_end(struct address_space *mapping,
fs/ext4/inode.c
3206
struct inode *inode = mapping->host;
fs/ext4/inode.c
3273
struct address_space *mapping,
fs/ext4/inode.c
3277
struct inode *inode = mapping->host;
fs/ext4/inode.c
3281
return ext4_write_end(iocb, mapping, pos,
fs/ext4/inode.c
3295
return ext4_da_do_write_end(mapping, pos, len, copied, folio);
fs/ext4/inode.c
3355
static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
fs/ext4/inode.c
3357
struct inode *inode = mapping->host;
fs/ext4/inode.c
3367
if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
fs/ext4/inode.c
3375
filemap_write_and_wait(mapping);
fs/ext4/inode.c
3378
ret = iomap_bmap(mapping, block, &ext4_iomap_ops);
fs/ext4/inode.c
3399
journal_t *journal = EXT4_JOURNAL(folio->mapping->host);
fs/ext4/inode.c
3422
struct inode *inode = folio->mapping->host;
fs/ext4/inode.c
3911
static bool ext4_journalled_dirty_folio(struct address_space *mapping,
fs/ext4/inode.c
3917
return filemap_dirty_folio(mapping, folio);
fs/ext4/inode.c
3920
static bool ext4_dirty_folio(struct address_space *mapping, struct folio *folio)
fs/ext4/inode.c
3924
return block_dirty_folio(mapping, folio);
fs/ext4/inode.c
4016
struct address_space *mapping, loff_t from, loff_t length)
fs/ext4/inode.c
4020
struct inode *inode = mapping->host;
fs/ext4/inode.c
4025
folio = __filemap_get_folio(mapping, from >> PAGE_SHIFT,
fs/ext4/inode.c
4027
mapping_gfp_constraint(mapping, ~__GFP_FS));
fs/ext4/inode.c
4119
struct address_space *mapping, loff_t from, loff_t length)
fs/ext4/inode.c
4121
struct inode *inode = mapping->host;
fs/ext4/inode.c
4136
return __ext4_block_zero_page_range(handle, mapping, from, length);
fs/ext4/inode.c
4146
struct address_space *mapping, loff_t from)
fs/ext4/inode.c
4150
struct inode *inode = mapping->host;
fs/ext4/inode.c
4159
return ext4_block_zero_page_range(handle, mapping, from, length);
fs/ext4/inode.c
4166
struct address_space *mapping = inode->i_mapping;
fs/ext4/inode.c
4181
err = ext4_block_zero_page_range(handle, mapping,
fs/ext4/inode.c
4187
err = ext4_block_zero_page_range(handle, mapping,
fs/ext4/inode.c
4194
err = ext4_block_zero_page_range(handle, mapping,
fs/ext4/inode.c
4513
struct address_space *mapping = inode->i_mapping;
fs/ext4/inode.c
4557
ext4_block_truncate_page(handle, mapping, inode->i_size);
fs/ext4/inode.c
6612
if (folio->mapping != inode->i_mapping || folio_pos(folio) > size) {
fs/ext4/inode.c
6653
struct address_space *mapping = inode->i_mapping;
fs/ext4/inode.c
6663
filemap_invalidate_lock_shared(mapping);
fs/ext4/inode.c
6692
if (folio->mapping != mapping || folio_pos(folio) > size) {
fs/ext4/inode.c
6731
filemap_invalidate_unlock_shared(mapping);
fs/ext4/mballoc.c
1391
inode = folio->mapping->host;
fs/ext4/mballoc.c
1563
BUG_ON(folio->mapping != inode->i_mapping);
fs/ext4/mballoc.c
1581
BUG_ON(folio->mapping != inode->i_mapping);
fs/ext4/mballoc.c
1735
if (WARN_RATELIMIT(folio->mapping != inode->i_mapping,
fs/ext4/mballoc.c
1785
if (WARN_RATELIMIT(folio->mapping != inode->i_mapping,
fs/ext4/move_extent.c
119
struct inode *inode = folio->mapping->host;
fs/ext4/move_extent.c
64
struct address_space *mapping[2];
fs/ext4/move_extent.c
70
mapping[0] = inode1->i_mapping;
fs/ext4/move_extent.c
71
mapping[1] = inode2->i_mapping;
fs/ext4/move_extent.c
74
mapping[0] = inode2->i_mapping;
fs/ext4/move_extent.c
75
mapping[1] = inode1->i_mapping;
fs/ext4/move_extent.c
80
folio[0] = __filemap_get_folio(mapping[0], index1, fgp_flags,
fs/ext4/move_extent.c
81
mapping_gfp_mask(mapping[0]));
fs/ext4/move_extent.c
87
folio[1] = __filemap_get_folio(mapping[1], index2, fgp_flags,
fs/ext4/move_extent.c
88
mapping_gfp_mask(mapping[1]));
fs/ext4/page-io.c
121
mapping_set_error(folio->mapping, err);
fs/ext4/page-io.c
462
struct inode *inode = folio->mapping->host;
fs/ext4/readpage.c
397
struct inode *inode = folio->mapping->host;
fs/ext4/readpage.c
418
struct inode *inode = rac->mapping->host;
fs/ext4/super.c
552
struct address_space *mapping = jinode->i_vfs_inode->i_mapping;
fs/ext4/super.c
569
while ((folio = writeback_iter(mapping, &wbc, folio, &error))) {
fs/ext4/truncate.h
14
struct address_space *mapping = inode->i_mapping;
fs/ext4/truncate.h
20
filemap_invalidate_lock(mapping);
fs/ext4/truncate.h
21
truncate_inode_pages(mapping, inode->i_size);
fs/ext4/truncate.h
23
filemap_invalidate_unlock(mapping);
fs/ext4/verity.c
70
struct address_space *mapping = inode->i_mapping;
fs/ext4/verity.c
71
const struct address_space_operations *aops = mapping->a_ops;
fs/ext4/verity.c
83
res = aops->write_begin(NULL, mapping, pos, n, &folio, &fsdata);
fs/ext4/verity.c
89
res = aops->write_end(NULL, mapping, pos, n, n, folio, fsdata);
fs/f2fs/checkpoint.c
249
struct address_space *mapping = META_MAPPING(sbi);
fs/f2fs/checkpoint.c
252
folio = f2fs_grab_cache_folio(mapping, index, false);
fs/f2fs/checkpoint.c
266
struct address_space *mapping = META_MAPPING(sbi);
fs/f2fs/checkpoint.c
283
folio = f2fs_grab_cache_folio(mapping, index, false);
fs/f2fs/checkpoint.c
584
static int f2fs_write_meta_pages(struct address_space *mapping,
fs/f2fs/checkpoint.c
587
struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
fs/f2fs/checkpoint.c
604
trace_f2fs_writepages(mapping->host, wbc, META);
fs/f2fs/checkpoint.c
613
trace_f2fs_writepages(mapping->host, wbc, META);
fs/f2fs/checkpoint.c
620
struct address_space *mapping = META_MAPPING(sbi);
fs/f2fs/checkpoint.c
632
while ((nr_folios = filemap_get_folios_tag(mapping, &index,
fs/f2fs/checkpoint.c
686
static bool f2fs_dirty_meta_folio(struct address_space *mapping,
fs/f2fs/checkpoint.c
693
if (filemap_dirty_folio(mapping, folio)) {
fs/f2fs/checkpoint.c
694
inc_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_META);
fs/f2fs/compress.c
1090
struct address_space *mapping = cc->inode->i_mapping;
fs/f2fs/compress.c
1108
folio = f2fs_filemap_get_folio(mapping, start_idx + i,
fs/f2fs/compress.c
1141
folio = filemap_lock_folio(mapping, start_idx + i);
fs/f2fs/compress.c
1514
struct address_space *mapping = cc->inode->i_mapping;
fs/f2fs/compress.c
1515
struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
fs/f2fs/compress.c
1546
if (folio->mapping != mapping) {
fs/f2fs/compress.c
1993
struct address_space *mapping = COMPRESS_MAPPING(sbi);
fs/f2fs/compress.c
1998
if (!mapping->nrpages)
fs/f2fs/compress.c
2006
nr = filemap_get_folios(mapping, &index, end - 1, &fbatch);
fs/f2fs/compress.c
2014
if (folio->mapping != mapping) {
fs/f2fs/compress.c
2024
generic_error_remove_folio(mapping, folio);
fs/f2fs/compress.c
598
folio->mapping = NULL;
fs/f2fs/compress.c
95
folio->mapping = inode->i_mapping;
fs/f2fs/data.c
1269
struct address_space *mapping = inode->i_mapping;
fs/f2fs/data.c
1274
folio = f2fs_grab_cache_folio(mapping, index, for_write);
fs/f2fs/data.c
1279
pgoff_t folio_index = mapping_align_index(mapping, index);
fs/f2fs/data.c
1282
invalidate_inode_pages2_range(mapping, folio_index,
fs/f2fs/data.c
1353
struct address_space *mapping = inode->i_mapping;
fs/f2fs/data.c
1356
folio = f2fs_filemap_get_folio(mapping, index, FGP_ACCESSED, 0);
fs/f2fs/data.c
1387
struct address_space *mapping = inode->i_mapping;
fs/f2fs/data.c
1396
if (unlikely(folio->mapping != mapping || !folio_test_uptodate(folio))) {
fs/f2fs/data.c
1415
struct address_space *mapping = inode->i_mapping;
fs/f2fs/data.c
1420
folio = f2fs_grab_cache_folio(mapping, index, true);
fs/f2fs/data.c
2634
struct address_space *mapping = rac ? rac->mapping : folio->mapping;
fs/f2fs/data.c
2638
if (mapping_large_folio_support(mapping))
fs/f2fs/data.c
2740
struct inode *inode = folio->mapping->host;
fs/f2fs/data.c
2766
struct inode *inode = rac->mapping->host;
fs/f2fs/data.c
2929
struct inode *inode = folio->mapping->host;
fs/f2fs/data.c
3061
struct inode *inode = folio->mapping->host;
fs/f2fs/data.c
3094
mapping_set_error(folio->mapping, -EIO);
fs/f2fs/data.c
3220
static int f2fs_write_cache_pages(struct address_space *mapping,
fs/f2fs/data.c
3229
struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
fs/f2fs/data.c
3233
struct inode *inode = mapping->host;
fs/f2fs/data.c
3272
if (get_dirty_pages(mapping->host) <=
fs/f2fs/data.c
3273
SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
fs/f2fs/data.c
3274
set_inode_flag(mapping->host, FI_HOT_DATA);
fs/f2fs/data.c
3276
clear_inode_flag(mapping->host, FI_HOT_DATA);
fs/f2fs/data.c
3279
index = mapping->writeback_index; /* prev offset */
fs/f2fs/data.c
3291
tag_pages_for_writeback(mapping, index, end);
fs/f2fs/data.c
3296
nr_folios = filemap_get_folios_tag(mapping, &index, end,
fs/f2fs/data.c
3391
if (unlikely(folio->mapping != mapping)) {
fs/f2fs/data.c
3484
mapping->writeback_index = done_index;
fs/f2fs/data.c
3487
f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host,
fs/f2fs/data.c
3547
static int __f2fs_write_data_pages(struct address_space *mapping,
fs/f2fs/data.c
3551
struct inode *inode = mapping->host;
fs/f2fs/data.c
3575
trace_f2fs_writepages(mapping->host, wbc, DATA);
fs/f2fs/data.c
3595
ret = f2fs_write_cache_pages(mapping, wbc, io_type);
fs/f2fs/data.c
3624
trace_f2fs_writepages(mapping->host, wbc, DATA);
fs/f2fs/data.c
3628
static int f2fs_write_data_pages(struct address_space *mapping,
fs/f2fs/data.c
3631
struct inode *inode = mapping->host;
fs/f2fs/data.c
3633
return __f2fs_write_data_pages(mapping, wbc,
fs/f2fs/data.c
3662
struct inode *inode = folio->mapping->host;
fs/f2fs/data.c
378
mapping_set_error(folio->mapping, -EIO);
fs/f2fs/data.c
3809
struct inode *inode = folio->mapping->host;
fs/f2fs/data.c
3849
struct address_space *mapping,
fs/f2fs/data.c
3853
struct inode *inode = mapping->host;
fs/f2fs/data.c
3907
folio = f2fs_filemap_get_folio(mapping, index,
fs/f2fs/data.c
3909
mapping_gfp_mask(mapping));
fs/f2fs/data.c
3933
if (folio->mapping != mapping) {
fs/f2fs/data.c
3967
if (unlikely(folio->mapping != mapping)) {
fs/f2fs/data.c
3987
struct address_space *mapping,
fs/f2fs/data.c
3991
struct inode *inode = folio->mapping->host;
fs/f2fs/data.c
4043
struct inode *inode = folio->mapping->host;
fs/f2fs/data.c
4078
static bool f2fs_dirty_data_folio(struct address_space *mapping,
fs/f2fs/data.c
4081
struct inode *inode = mapping->host;
fs/f2fs/data.c
4089
if (filemap_dirty_folio(mapping, folio)) {
fs/f2fs/data.c
4126
static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
fs/f2fs/data.c
4128
struct inode *inode = mapping->host;
fs/f2fs/data.c
4135
if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
fs/f2fs/data.c
4136
filemap_write_and_wait(mapping);
fs/f2fs/data.c
4236
struct address_space *mapping = swap_file->f_mapping;
fs/f2fs/data.c
4237
struct inode *inode = mapping->host;
fs/f2fs/data.c
4422
struct address_space *mapping = folio->mapping;
fs/f2fs/data.c
4425
xa_lock_irqsave(&mapping->i_pages, flags);
fs/f2fs/data.c
4426
__xa_clear_mark(&mapping->i_pages, folio->index,
fs/f2fs/data.c
4428
xa_unlock_irqrestore(&mapping->i_pages, flags);
fs/f2fs/data.c
485
F2FS_I(fio->folio->mapping->host)->ioprio_hint == F2FS_IOPRIO_WRITE)
fs/f2fs/data.c
58
struct address_space *mapping = folio->mapping;
fs/f2fs/data.c
611
if (inode && inode == target->mapping->host)
fs/f2fs/data.c
65
inode = mapping->host;
fs/f2fs/data.c
764
f2fs_set_bio_crypt_ctx(bio, fio_folio->mapping->host,
fs/f2fs/data.c
81
struct address_space *mapping = folio->mapping;
fs/f2fs/data.c
83
if (mapping) {
fs/f2fs/data.c
84
struct inode *inode = mapping->host;
fs/f2fs/data.c
861
fio_folio->mapping->host,
fs/f2fs/data.c
961
f2fs_set_bio_crypt_ctx(bio, folio->mapping->host,
fs/f2fs/f2fs.h
2188
static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping)
fs/f2fs/f2fs.h
2190
return F2FS_I_SB(mapping->host);
fs/f2fs/f2fs.h
2195
return F2FS_M_SB(folio->mapping);
fs/f2fs/f2fs.h
2265
return folio->mapping == META_MAPPING(F2FS_F_SB(folio));
fs/f2fs/f2fs.h
2270
return folio->mapping == NODE_MAPPING(F2FS_F_SB(folio));
fs/f2fs/f2fs.h
3026
static inline struct folio *f2fs_grab_cache_folio(struct address_space *mapping,
fs/f2fs/f2fs.h
3039
folio = __filemap_get_folio(mapping, index, fgf_flags, 0);
fs/f2fs/f2fs.h
3043
if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC))
fs/f2fs/f2fs.h
3048
return filemap_grab_folio(mapping, index);
fs/f2fs/f2fs.h
3051
folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
fs/f2fs/f2fs.h
3052
mapping_gfp_mask(mapping));
fs/f2fs/f2fs.h
3059
struct address_space *mapping, pgoff_t index,
fs/f2fs/f2fs.h
3062
if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET))
fs/f2fs/f2fs.h
3065
return __filemap_get_folio(mapping, index, fgp_flags, gfp_mask);
fs/f2fs/f2fs.h
4067
return fio->folio->mapping->host;
fs/f2fs/file.c
133
if (unlikely(folio->mapping != inode->i_mapping ||
fs/f2fs/file.c
1678
struct address_space *mapping = inode->i_mapping;
fs/f2fs/file.c
1692
ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
fs/f2fs/file.c
1729
filemap_invalidate_lock(mapping);
fs/f2fs/file.c
1741
filemap_invalidate_unlock(mapping);
fs/f2fs/file.c
1753
filemap_invalidate_unlock(mapping);
fs/f2fs/file.c
1788
struct address_space *mapping = inode->i_mapping;
fs/f2fs/file.c
1811
filemap_invalidate_lock(mapping);
fs/f2fs/file.c
1813
filemap_invalidate_unlock(mapping);
fs/f2fs/file.c
1818
ret = filemap_write_and_wait_range(mapping, offset, LLONG_MAX);
fs/f2fs/file.c
1829
filemap_invalidate_lock(mapping);
fs/f2fs/file.c
1849
filemap_invalidate_unlock(mapping);
fs/f2fs/file.c
1855
filemap_invalidate_lock(mapping);
fs/f2fs/file.c
1856
ret = filemap_write_and_wait_range(mapping, offset, LLONG_MAX);
fs/f2fs/file.c
1858
filemap_invalidate_unlock(mapping);
fs/f2fs/file.c
4178
struct address_space *mapping = inode->i_mapping;
fs/f2fs/file.c
422
static bool __found_offset(struct address_space *mapping,
fs/f2fs/file.c
4240
filemap_invalidate_lock(mapping);
fs/f2fs/file.c
4242
ret = filemap_write_and_wait_range(mapping, range.start,
fs/f2fs/file.c
4247
truncate_inode_pages_range(mapping, range.start,
fs/f2fs/file.c
426
struct inode *inode = mapping->host;
fs/f2fs/file.c
4328
filemap_invalidate_unlock(mapping);
fs/f2fs/file.c
441
xa_get_mark(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY))
fs/f2fs/file.c
4436
struct address_space *mapping = inode->i_mapping;
fs/f2fs/file.c
4441
filemap_invalidate_lock_shared(mapping);
fs/f2fs/file.c
4443
filemap_invalidate_unlock_shared(mapping);
fs/f2fs/file.c
4446
folio = read_cache_folio(mapping, page_idx, NULL, NULL);
fs/f2fs/file.c
4456
folio = filemap_lock_folio(mapping, redirty_idx);
fs/f2fs/file.c
5075
static void f2fs_flush_buffered_write(struct address_space *mapping,
fs/f2fs/file.c
5080
ret = filemap_write_and_wait_range(mapping, start_pos, end_pos);
fs/f2fs/file.c
5083
invalidate_mapping_pages(mapping,
fs/f2fs/file.c
5304
struct address_space *mapping;
fs/f2fs/file.c
5315
mapping = filp->f_mapping;
fs/f2fs/file.c
5316
if (!mapping || len < 0)
fs/f2fs/file.c
5319
bdi = inode_to_bdi(mapping->host);
fs/f2fs/file.c
744
struct address_space *mapping = inode->i_mapping;
fs/f2fs/file.c
751
folio = filemap_lock_folio(mapping, index);
fs/f2fs/gc.c
1219
struct address_space *mapping = f2fs_is_cow_file(inode) ?
fs/f2fs/gc.c
1235
folio = f2fs_grab_cache_folio(mapping, index, true);
fs/f2fs/gc.c
1310
struct address_space *mapping = f2fs_is_cow_file(inode) ?
fs/f2fs/gc.c
1334
folio = f2fs_grab_cache_folio(mapping, bidx, false);
fs/f2fs/inline.c
84
struct inode *inode = folio->mapping->host;
fs/f2fs/node.c
2240
static int f2fs_write_node_pages(struct address_space *mapping,
fs/f2fs/node.c
2243
struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
fs/f2fs/node.c
2268
trace_f2fs_writepages(mapping->host, wbc, NODE);
fs/f2fs/node.c
2282
trace_f2fs_writepages(mapping->host, wbc, NODE);
fs/f2fs/node.c
2286
static bool f2fs_dirty_node_folio(struct address_space *mapping,
fs/f2fs/node.c
2295
f2fs_inode_chksum_set(F2FS_M_SB(mapping), folio);
fs/f2fs/node.c
2297
if (filemap_dirty_folio(mapping, folio)) {
fs/f2fs/node.c
2298
inc_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES);
fs/f2fs/segment.c
3980
if (fscrypt_inode_uses_fs_layer_crypto(folio->mapping->host))
fs/f2fs/super.c
3120
struct address_space *mapping = inode->i_mapping;
fs/f2fs/super.c
3136
folio = mapping_read_folio_gfp(mapping, off >> PAGE_SHIFT,
fs/f2fs/super.c
3151
if (unlikely(folio->mapping != mapping)) {
fs/f2fs/super.c
3177
struct address_space *mapping = inode->i_mapping;
fs/f2fs/super.c
3178
const struct address_space_operations *a_ops = mapping->a_ops;
fs/f2fs/super.c
3190
err = a_ops->write_begin(NULL, mapping, off, tocopy,
fs/f2fs/super.c
3203
a_ops->write_end(NULL, mapping, off, tocopy, tocopy,
fs/f2fs/super.c
3344
struct address_space *mapping = dqopt->files[type]->i_mapping;
fs/f2fs/super.c
3351
ret = filemap_fdatawrite(mapping);
fs/f2fs/super.c
3359
ret = filemap_fdatawait(mapping);
fs/f2fs/verity.c
74
struct address_space *mapping = inode->i_mapping;
fs/f2fs/verity.c
75
const struct address_space_operations *aops = mapping->a_ops;
fs/f2fs/verity.c
87
res = aops->write_begin(NULL, mapping, pos, n, &folio, &fsdata);
fs/f2fs/verity.c
93
res = aops->write_end(NULL, mapping, pos, n, n, folio, fsdata);
fs/fat/file.c
220
struct address_space *mapping = inode->i_mapping;
fs/fat/file.c
237
err = filemap_fdatawrite_range(mapping, start,
fs/fat/file.c
239
err2 = sync_mapping_buffers(mapping);
fs/fat/file.c
246
err = filemap_fdatawait_range(mapping, start,
fs/fat/inode.c
197
static int fat_writepages(struct address_space *mapping,
fs/fat/inode.c
200
return mpage_writepages(mapping, wbc, fat_get_block);
fs/fat/inode.c
213
static void fat_write_failed(struct address_space *mapping, loff_t to)
fs/fat/inode.c
215
struct inode *inode = mapping->host;
fs/fat/inode.c
224
struct address_space *mapping,
fs/fat/inode.c
230
err = cont_write_begin(iocb, mapping, pos, len,
fs/fat/inode.c
232
&MSDOS_I(mapping->host)->mmu_private);
fs/fat/inode.c
234
fat_write_failed(mapping, pos + len);
fs/fat/inode.c
239
struct address_space *mapping,
fs/fat/inode.c
243
struct inode *inode = mapping->host;
fs/fat/inode.c
245
err = generic_write_end(iocb, mapping, pos, len, copied, folio, fsdata);
fs/fat/inode.c
247
fat_write_failed(mapping, pos + len);
fs/fat/inode.c
259
struct address_space *mapping = file->f_mapping;
fs/fat/inode.c
260
struct inode *inode = mapping->host;
fs/fat/inode.c
286
fat_write_failed(mapping, offset + count);
fs/fat/inode.c
316
static sector_t _fat_bmap(struct address_space *mapping, sector_t block)
fs/fat/inode.c
321
down_read(&MSDOS_I(mapping->host)->truncate_lock);
fs/fat/inode.c
322
blocknr = generic_block_bmap(mapping, block, fat_get_block_bmap);
fs/fat/inode.c
323
up_read(&MSDOS_I(mapping->host)->truncate_lock);
fs/freevxfs/vxfs_immed.c
33
struct vxfs_inode_info *vip = VXFS_INO(folio->mapping->host);
fs/freevxfs/vxfs_subr.c
149
vxfs_bmap(struct address_space *mapping, sector_t block)
fs/freevxfs/vxfs_subr.c
151
return generic_block_bmap(mapping, block, vxfs_getblk);
fs/freevxfs/vxfs_subr.c
44
vxfs_get_page(struct address_space *mapping, u_long n)
fs/freevxfs/vxfs_subr.c
48
pp = read_mapping_page(mapping, n, NULL);
fs/fs-writeback.c
1753
struct address_space *mapping = inode->i_mapping;
fs/fs-writeback.c
1762
ret = do_writepages(mapping, wbc);
fs/fs-writeback.c
1772
int err = filemap_fdatawait(mapping);
fs/fs-writeback.c
1811
if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
fs/fs-writeback.c
2776
struct address_space *mapping = inode->i_mapping;
fs/fs-writeback.c
2791
if (!mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
fs/fs-writeback.c
2812
filemap_fdatawait_keep_errors(mapping);
fs/fs-writeback.c
410
struct address_space *mapping = inode->i_mapping;
fs/fs-writeback.c
411
XA_STATE(xas, &mapping->i_pages, 0);
fs/fs-writeback.c
416
xa_lock_irq(&mapping->i_pages);
fs/fs-writeback.c
448
if (mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) {
fs/fs-writeback.c
496
xa_unlock_irq(&mapping->i_pages);
fs/fuse/dev.c
1764
struct address_space *mapping;
fs/fuse/dev.c
1792
mapping = inode->i_mapping;
fs/fuse/dev.c
1809
folio = filemap_grab_folio(mapping, index);
fs/fuse/dev.c
1863
struct address_space *mapping = inode->i_mapping;
fs/fuse/dev.c
1914
folio = filemap_get_folio(mapping, index);
fs/fuse/dev.c
957
folio->mapping != NULL ||
fs/fuse/dir.c
2139
struct address_space *mapping = inode->i_mapping;
fs/fuse/dir.c
2165
filemap_invalidate_lock(mapping);
fs/fuse/dir.c
2169
filemap_invalidate_unlock(mapping);
fs/fuse/dir.c
2290
invalidate_inode_pages2(mapping);
fs/fuse/dir.c
2296
filemap_invalidate_unlock(mapping);
fs/fuse/dir.c
2307
filemap_invalidate_unlock(mapping);
fs/fuse/dir.c
2474
int err = fuse_readlink_folio(folio->mapping->host, folio);
fs/fuse/file.c
1005
struct address_space *mapping;
fs/fuse/file.c
1009
mapping = ap->folios[0]->mapping;
fs/fuse/file.c
1010
inode = mapping->host;
fs/fuse/file.c
1069
struct inode *inode = rac->mapping->host;
fs/fuse/file.c
1239
struct address_space *mapping,
fs/fuse/file.c
1244
struct fuse_conn *fc = get_fuse_conn(mapping->host);
fs/fuse/file.c
1262
folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
fs/fuse/file.c
1263
mapping_gfp_mask(mapping));
fs/fuse/file.c
1269
if (mapping_writably_mapped(mapping))
fs/fuse/file.c
1334
struct address_space *mapping = iocb->ki_filp->f_mapping;
fs/fuse/file.c
1335
struct inode *inode = mapping->host;
fs/fuse/file.c
1358
count = fuse_fill_write_pages(&ia, mapping, ii, pos, nr_pages);
fs/fuse/file.c
1475
struct address_space *mapping = file->f_mapping;
fs/fuse/file.c
1477
struct inode *inode = mapping->host;
fs/fuse/file.c
1484
err = fuse_update_attributes(mapping->host, file,
fs/fuse/file.c
1647
struct address_space *mapping = file->f_mapping;
fs/fuse/file.c
1648
struct inode *inode = mapping->host;
fs/fuse/file.c
1668
res = filemap_write_and_wait_range(mapping, pos, pos + count - 1);
fs/fuse/file.c
1674
if (!cuse && filemap_range_has_writeback(mapping, pos, (pos + count - 1))) {
fs/fuse/file.c
1683
res = invalidate_inode_pages2_range(mapping, idx_from, idx_to);
fs/fuse/file.c
1747
invalidate_inode_pages2_range(mapping, idx_from, idx_to);
fs/fuse/file.c
2090
struct inode *inode = folio->mapping->host;
fs/fuse/file.c
2274
static int fuse_writepages(struct address_space *mapping,
fs/fuse/file.c
2277
struct inode *inode = mapping->host;
fs/fuse/file.c
2303
.inode = folio->mapping->host,
fs/fuse/file.c
2352
if (folio->mapping != inode->i_mapping) {
fs/fuse/file.c
2573
static sector_t fuse_bmap(struct address_space *mapping, sector_t block)
fs/fuse/file.c
2575
struct inode *inode = mapping->host;
fs/fuse/file.c
813
struct inode *inode = folio->mapping->host;
fs/fuse/file.c
966
struct inode *inode = folio->mapping->host;
fs/gfs2/aops.c
104
struct inode *inode = folio->mapping->host;
fs/gfs2/aops.c
126
int gfs2_jdata_writeback(struct address_space *mapping, struct writeback_control *wbc)
fs/gfs2/aops.c
128
struct inode *inode = mapping->host;
fs/gfs2/aops.c
130
struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
fs/gfs2/aops.c
138
while ((folio = writeback_iter(mapping, wbc, folio, &error))) {
fs/gfs2/aops.c
157
static int gfs2_writepages(struct address_space *mapping,
fs/gfs2/aops.c
160
struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
fs/gfs2/aops.c
162
.inode = mapping->host,
fs/gfs2/aops.c
190
static int gfs2_write_jdata_batch(struct address_space *mapping,
fs/gfs2/aops.c
195
struct inode *inode = mapping->host;
fs/gfs2/aops.c
218
if (unlikely(folio->mapping != mapping)) {
fs/gfs2/aops.c
282
static int gfs2_write_cache_jdata(struct address_space *mapping,
fs/gfs2/aops.c
299
writeback_index = mapping->writeback_index; /* prev offset */
fs/gfs2/aops.c
317
tag_pages_for_writeback(mapping, index, end);
fs/gfs2/aops.c
320
nr_folios = filemap_get_folios_tag(mapping, &index, end,
fs/gfs2/aops.c
325
ret = gfs2_write_jdata_batch(mapping, wbc, &fbatch,
fs/gfs2/aops.c
348
mapping->writeback_index = done_index;
fs/gfs2/aops.c
361
static int gfs2_jdata_writepages(struct address_space *mapping,
fs/gfs2/aops.c
364
struct gfs2_inode *ip = GFS2_I(mapping->host);
fs/gfs2/aops.c
365
struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
fs/gfs2/aops.c
368
ret = gfs2_write_cache_jdata(mapping, wbc);
fs/gfs2/aops.c
372
ret = gfs2_write_cache_jdata(mapping, wbc);
fs/gfs2/aops.c
420
struct inode *inode = folio->mapping->host;
fs/gfs2/aops.c
452
struct address_space *mapping = ip->i_inode.i_mapping;
fs/gfs2/aops.c
460
folio = read_cache_folio(mapping, index, gfs2_read_folio, NULL);
fs/gfs2/aops.c
494
struct inode *inode = rac->mapping->host;
fs/gfs2/aops.c
545
static bool gfs2_jdata_dirty_folio(struct address_space *mapping,
fs/gfs2/aops.c
550
return block_dirty_folio(mapping, folio);
fs/gfs2/aops.c
561
static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
fs/gfs2/aops.c
563
struct gfs2_inode *ip = GFS2_I(mapping->host);
fs/gfs2/aops.c
573
dblock = iomap_bmap(mapping, lblock, &gfs2_iomap_ops);
fs/gfs2/aops.c
608
struct gfs2_sbd *sdp = GFS2_SB(folio->mapping->host);
fs/gfs2/aops.c
649
struct address_space *mapping = folio->mapping;
fs/gfs2/aops.c
650
struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
fs/gfs2/aops.c
74
struct inode * const inode = folio->mapping->host;
fs/gfs2/aops.h
12
int gfs2_jdata_writeback(struct address_space *mapping, struct writeback_control *wbc);
fs/gfs2/file.c
1127
struct address_space *mapping = file->f_mapping;
fs/gfs2/file.c
1156
invalidate_mapping_pages(mapping,
fs/gfs2/file.c
396
if (gfs2_iomap_alloc(folio->mapping->host, pos, length, &iomap))
fs/gfs2/file.c
470
folio->mapping != inode->i_mapping) {
fs/gfs2/file.c
522
if (!folio_test_uptodate(folio) || folio->mapping != inode->i_mapping) {
fs/gfs2/file.c
745
struct address_space *mapping = file->f_mapping;
fs/gfs2/file.c
746
struct inode *inode = mapping->host;
fs/gfs2/file.c
751
if (mapping->nrpages) {
fs/gfs2/file.c
752
ret1 = filemap_fdatawrite_range(mapping, start, end);
fs/gfs2/file.c
773
if (mapping->nrpages)
fs/gfs2/glock.c
1088
struct address_space *mapping;
fs/gfs2/glock.c
1143
mapping = gfs2_glock2aspace(gl);
fs/gfs2/glock.c
1144
if (mapping) {
fs/gfs2/glock.c
1147
mapping->a_ops = &gfs2_meta_aops;
fs/gfs2/glock.c
1148
mapping->host = sdp->sd_inode;
fs/gfs2/glock.c
1149
mapping->flags = 0;
fs/gfs2/glock.c
1151
mapping_set_gfp_mask(mapping, gfp_mask);
fs/gfs2/glock.c
1152
mapping->i_private_data = NULL;
fs/gfs2/glock.c
1153
mapping->writeback_index = 0;
fs/gfs2/glock.c
2328
struct address_space *mapping = gfs2_glock2aspace(gl);
fs/gfs2/glock.c
2330
nrpages = mapping->nrpages;
fs/gfs2/glock.c
239
struct address_space *mapping = gfs2_glock2aspace(gl);
fs/gfs2/glock.c
245
if (mapping) {
fs/gfs2/glock.c
246
truncate_inode_pages_final(mapping);
fs/gfs2/glock.c
248
GLOCK_BUG_ON(gl, !mapping_empty(mapping));
fs/gfs2/glock.h
150
struct address_space mapping;
fs/gfs2/glock.h
179
return &gla->mapping;
fs/gfs2/glops.c
224
struct address_space *mapping = gfs2_aspace(sdp);
fs/gfs2/glops.c
236
truncate_inode_pages_range(mapping, start, end);
fs/gfs2/glops.c
324
struct address_space *mapping = ip->i_inode.i_mapping;
fs/gfs2/glops.c
325
filemap_fdatawrite(mapping);
fs/gfs2/glops.c
326
error = filemap_fdatawait(mapping);
fs/gfs2/glops.c
327
mapping_set_error(mapping, error);
fs/gfs2/glops.c
365
struct address_space *mapping = gfs2_glock2aspace(gl);
fs/gfs2/glops.c
366
truncate_inode_pages(mapping, 0);
fs/gfs2/glops.c
41
bh->b_folio->mapping, bh->b_folio->flags.f);
fs/gfs2/log.c
129
mapping = bh->b_folio->mapping;
fs/gfs2/log.c
130
if (!mapping)
fs/gfs2/log.c
133
BUG_ON(GFS2_SB(mapping->host) != sdp);
fs/gfs2/log.c
134
if (gfs2_is_jdata(GFS2_I(mapping->host)))
fs/gfs2/log.c
135
ret = gfs2_jdata_writeback(mapping, wbc);
fs/gfs2/log.c
137
ret = mapping->a_ops->writepages(mapping, wbc);
fs/gfs2/log.c
146
mapping_set_error(mapping, ret);
fs/gfs2/log.c
99
struct address_space *mapping;
fs/gfs2/lops.c
401
filemap_set_wb_err(fi.folio->mapping, error);
fs/gfs2/lops.c
507
struct address_space *mapping = jd->jd_inode->i_mapping;
fs/gfs2/lops.c
524
since = filemap_sample_wb_err(mapping);
fs/gfs2/lops.c
530
folio = filemap_grab_folio(mapping,
fs/gfs2/lops.c
591
ret = filemap_check_wb_err(mapping, since);
fs/gfs2/lops.c
593
truncate_inode_pages(mapping, 0);
fs/gfs2/main.c
66
address_space_init_once(&gla->mapping);
fs/gfs2/meta_io.c
128
struct address_space *mapping = gfs2_glock2aspace(gl);
fs/gfs2/meta_io.c
136
if (mapping == NULL)
fs/gfs2/meta_io.c
137
mapping = gfs2_aspace(sdp);
fs/gfs2/meta_io.c
144
folio = __filemap_get_folio(mapping, index,
fs/gfs2/meta_io.c
146
mapping_gfp_mask(mapping) | __GFP_NOFAIL);
fs/gfs2/meta_io.c
152
folio = __filemap_get_folio(mapping, index,
fs/gfs2/meta_io.c
343
struct address_space *mapping = bh->b_folio->mapping;
fs/gfs2/meta_io.c
344
struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
fs/gfs2/meta_io.c
412
struct address_space *mapping = ip->i_inode.i_mapping;
fs/gfs2/meta_io.c
420
folio = __filemap_get_folio(mapping, index, FGP_LOCK | FGP_ACCESSED, 0);
fs/gfs2/meta_io.c
89
static int gfs2_aspace_writepages(struct address_space *mapping,
fs/gfs2/meta_io.c
95
while ((folio = writeback_iter(mapping, wbc, folio, &error)))
fs/gfs2/meta_io.h
40
static inline struct gfs2_sbd *gfs2_mapping2sbd(struct address_space *mapping)
fs/gfs2/meta_io.h
42
struct inode *inode = mapping->host;
fs/gfs2/meta_io.h
43
if (mapping->a_ops == &gfs2_meta_aops) {
fs/gfs2/meta_io.h
45
container_of(mapping, struct gfs2_glock_aspace, mapping);
fs/gfs2/ops_fstype.c
1118
struct address_space *mapping;
fs/gfs2/ops_fstype.c
1177
mapping = gfs2_aspace(sdp);
fs/gfs2/ops_fstype.c
1178
mapping->a_ops = &gfs2_rgrp_aops;
fs/gfs2/quota.c
731
struct address_space *mapping = inode->i_mapping;
fs/gfs2/quota.c
741
folio = filemap_grab_folio(mapping, index);
fs/hfs/bnode.c
345
struct address_space *mapping;
fs/hfs/bnode.c
383
mapping = tree->inode->i_mapping;
fs/hfs/bnode.c
388
page = read_mapping_page(mapping, block++, NULL);
fs/hfs/btree.c
23
struct address_space *mapping;
fs/hfs/btree.c
81
mapping = tree->inode->i_mapping;
fs/hfs/btree.c
82
folio = filemap_grab_folio(mapping, 0);
fs/hfs/extent.c
489
struct address_space *mapping = inode->i_mapping;
fs/hfs/extent.c
495
res = hfs_write_begin(NULL, mapping, size + 1, 0, &folio,
fs/hfs/extent.c
498
res = generic_write_end(NULL, mapping, size + 1, 0, 0,
fs/hfs/hfs_fs.h
180
int hfs_write_begin(const struct kiocb *iocb, struct address_space *mapping,
fs/hfs/inode.c
128
struct address_space *mapping = file->f_mapping;
fs/hfs/inode.c
129
struct inode *inode = mapping->host;
fs/hfs/inode.c
144
hfs_write_failed(mapping, end);
fs/hfs/inode.c
150
static int hfs_writepages(struct address_space *mapping,
fs/hfs/inode.c
153
return mpage_writepages(mapping, wbc, hfs_get_block);
fs/hfs/inode.c
37
static void hfs_write_failed(struct address_space *mapping, loff_t to)
fs/hfs/inode.c
39
struct inode *inode = mapping->host;
fs/hfs/inode.c
47
int hfs_write_begin(const struct kiocb *iocb, struct address_space *mapping,
fs/hfs/inode.c
53
ret = cont_write_begin(iocb, mapping, pos, len, foliop, fsdata,
fs/hfs/inode.c
55
&HFS_I(mapping->host)->phys_size);
fs/hfs/inode.c
57
hfs_write_failed(mapping, pos + len);
fs/hfs/inode.c
62
static sector_t hfs_bmap(struct address_space *mapping, sector_t block)
fs/hfs/inode.c
64
return generic_block_bmap(mapping, block, hfs_get_block);
fs/hfs/inode.c
69
struct inode *inode = folio->mapping->host;
fs/hfsplus/bitmap.c
132
page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
fs/hfsplus/bitmap.c
168
struct address_space *mapping;
fs/hfsplus/bitmap.c
183
mapping = sbi->alloc_file->i_mapping;
fs/hfsplus/bitmap.c
185
page = read_mapping_page(mapping, pnr, NULL);
fs/hfsplus/bitmap.c
219
page = read_mapping_page(mapping, ++pnr, NULL);
fs/hfsplus/bitmap.c
24
struct address_space *mapping;
fs/hfsplus/bitmap.c
36
mapping = sbi->alloc_file->i_mapping;
fs/hfsplus/bitmap.c
37
page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL);
fs/hfsplus/bitmap.c
81
page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
fs/hfsplus/bnode.c
453
struct address_space *mapping;
fs/hfsplus/bnode.c
493
mapping = tree->inode->i_mapping;
fs/hfsplus/bnode.c
498
page = read_mapping_page(mapping, block, NULL);
fs/hfsplus/btree.c
137
struct address_space *mapping;
fs/hfsplus/btree.c
160
mapping = tree->inode->i_mapping;
fs/hfsplus/btree.c
161
page = read_mapping_page(mapping, 0, NULL);
fs/hfsplus/extents.c
552
struct address_space *mapping = inode->i_mapping;
fs/hfsplus/extents.c
557
res = hfsplus_write_begin(NULL, mapping, size, 0,
fs/hfsplus/extents.c
561
res = generic_write_end(NULL, mapping, size, 0, 0,
fs/hfsplus/hfsplus_fs.h
446
struct address_space *mapping,
fs/hfsplus/inode.c
126
struct address_space *mapping = file->f_mapping;
fs/hfsplus/inode.c
127
struct inode *inode = mapping->host;
fs/hfsplus/inode.c
142
hfsplus_write_failed(mapping, end);
fs/hfsplus/inode.c
148
static int hfsplus_writepages(struct address_space *mapping,
fs/hfsplus/inode.c
151
return mpage_writepages(mapping, wbc, hfsplus_get_block);
fs/hfsplus/inode.c
31
static void hfsplus_write_failed(struct address_space *mapping, loff_t to)
fs/hfsplus/inode.c
33
struct inode *inode = mapping->host;
fs/hfsplus/inode.c
42
struct address_space *mapping, loff_t pos,
fs/hfsplus/inode.c
48
ret = cont_write_begin(iocb, mapping, pos, len, foliop, fsdata,
fs/hfsplus/inode.c
50
&HFSPLUS_I(mapping->host)->phys_size);
fs/hfsplus/inode.c
52
hfsplus_write_failed(mapping, pos + len);
fs/hfsplus/inode.c
57
static sector_t hfsplus_bmap(struct address_space *mapping, sector_t block)
fs/hfsplus/inode.c
59
return generic_block_bmap(mapping, block, hfsplus_get_block);
fs/hfsplus/inode.c
64
struct inode *inode = folio->mapping->host;
fs/hfsplus/xattr.c
131
struct address_space *mapping;
fs/hfsplus/xattr.c
217
mapping = attr_file->i_mapping;
fs/hfsplus/xattr.c
224
page = read_mapping_page(mapping, index, NULL);
fs/hostfs/hostfs_kern.c
399
static int hostfs_writepages(struct address_space *mapping,
fs/hostfs/hostfs_kern.c
402
struct inode *inode = mapping->host;
fs/hostfs/hostfs_kern.c
407
while ((folio = writeback_iter(mapping, wbc, folio, &err))) {
fs/hostfs/hostfs_kern.c
422
mapping_set_error(mapping, err);
fs/hostfs/hostfs_kern.c
449
struct address_space *mapping,
fs/hostfs/hostfs_kern.c
455
*foliop = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
fs/hostfs/hostfs_kern.c
456
mapping_gfp_mask(mapping));
fs/hostfs/hostfs_kern.c
463
struct address_space *mapping,
fs/hostfs/hostfs_kern.c
467
struct inode *inode = mapping->host;
fs/hpfs/file.c
173
static int hpfs_writepages(struct address_space *mapping,
fs/hpfs/file.c
176
return mpage_writepages(mapping, wbc, hpfs_get_block);
fs/hpfs/file.c
179
static void hpfs_write_failed(struct address_space *mapping, loff_t to)
fs/hpfs/file.c
181
struct inode *inode = mapping->host;
fs/hpfs/file.c
194
struct address_space *mapping,
fs/hpfs/file.c
200
ret = cont_write_begin(iocb, mapping, pos, len, foliop, fsdata,
fs/hpfs/file.c
202
&hpfs_i(mapping->host)->mmu_private);
fs/hpfs/file.c
204
hpfs_write_failed(mapping, pos + len);
fs/hpfs/file.c
210
struct address_space *mapping,
fs/hpfs/file.c
214
struct inode *inode = mapping->host;
fs/hpfs/file.c
216
err = generic_write_end(iocb, mapping, pos, len, copied, folio, fsdata);
fs/hpfs/file.c
218
hpfs_write_failed(mapping, pos + len);
fs/hpfs/file.c
228
static sector_t _hpfs_bmap(struct address_space *mapping, sector_t block)
fs/hpfs/file.c
230
return generic_block_bmap(mapping, block, hpfs_get_block);
fs/hpfs/namei.c
482
struct inode *i = folio->mapping->host;
fs/hugetlbfs/inode.c
1055
static int hugetlbfs_migrate_folio(struct address_space *mapping,
fs/hugetlbfs/inode.c
1061
rc = migrate_huge_page_move_mapping(mapping, dst, src);
fs/hugetlbfs/inode.c
1079
static int hugetlbfs_error_remove_folio(struct address_space *mapping,
fs/hugetlbfs/inode.c
243
struct address_space *mapping = file->f_mapping;
fs/hugetlbfs/inode.c
244
struct inode *inode = mapping->host;
fs/hugetlbfs/inode.c
271
folio = filemap_lock_hugetlb_folio(h, mapping, index);
fs/hugetlbfs/inode.c
318
struct address_space *mapping,
fs/hugetlbfs/inode.c
326
struct address_space *mapping,
fs/hugetlbfs/inode.c
400
struct address_space *mapping,
fs/hugetlbfs/inode.c
403
struct rb_root_cached *root = &mapping->i_mmap;
fs/hugetlbfs/inode.c
414
i_mmap_lock_write(mapping);
fs/hugetlbfs/inode.c
442
i_mmap_unlock_write(mapping);
fs/hugetlbfs/inode.c
451
i_mmap_lock_write(mapping);
fs/hugetlbfs/inode.c
519
struct address_space *mapping,
fs/hugetlbfs/inode.c
535
hugetlb_unmap_file_folio(h, mapping, folio, index);
fs/hugetlbfs/inode.c
581
struct address_space *mapping = &inode->i_data;
fs/hugetlbfs/inode.c
590
while (filemap_get_folios(mapping, &next, end - 1, &fbatch)) {
fs/hugetlbfs/inode.c
596
hash = hugetlb_fault_mutex_hash(mapping, index);
fs/hugetlbfs/inode.c
602
if (remove_inode_single_folio(h, inode, mapping, folio,
fs/hugetlbfs/inode.c
641
struct address_space *mapping = inode->i_mapping;
fs/hugetlbfs/inode.c
648
i_mmap_lock_write(mapping);
fs/hugetlbfs/inode.c
649
if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
fs/hugetlbfs/inode.c
650
hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0,
fs/hugetlbfs/inode.c
652
i_mmap_unlock_write(mapping);
fs/hugetlbfs/inode.c
657
struct address_space *mapping,
fs/hugetlbfs/inode.c
664
folio = filemap_lock_hugetlb_folio(h, mapping, idx);
fs/hugetlbfs/inode.c
682
struct address_space *mapping = inode->i_mapping;
fs/hugetlbfs/inode.c
701
i_mmap_lock_write(mapping);
fs/hugetlbfs/inode.c
705
hugetlbfs_zero_partial_page(h, mapping,
fs/hugetlbfs/inode.c
710
if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
fs/hugetlbfs/inode.c
711
hugetlb_vmdelete_list(&mapping->i_mmap,
fs/hugetlbfs/inode.c
718
hugetlbfs_zero_partial_page(h, mapping,
fs/hugetlbfs/inode.c
721
i_mmap_unlock_write(mapping);
fs/hugetlbfs/inode.c
737
struct address_space *mapping = inode->i_mapping;
fs/hugetlbfs/inode.c
806
hash = hugetlb_fault_mutex_hash(mapping, index);
fs/hugetlbfs/inode.c
810
folio = filemap_get_folio(mapping, index << huge_page_order(h));
fs/hugetlbfs/inode.c
833
error = hugetlb_add_to_page_cache(folio, mapping, index);
fs/inode.c
232
struct address_space *const mapping = &inode->i_data;
fs/inode.c
278
mapping->a_ops = &empty_aops;
fs/inode.c
279
mapping->host = inode;
fs/inode.c
280
mapping->flags = 0;
fs/inode.c
281
mapping->wb_err = 0;
fs/inode.c
282
atomic_set(&mapping->i_mmap_writable, 0);
fs/inode.c
284
atomic_set(&mapping->nr_thps, 0);
fs/inode.c
286
mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
fs/inode.c
287
mapping->i_private_data = NULL;
fs/inode.c
288
mapping->writeback_index = 0;
fs/inode.c
289
init_rwsem(&mapping->invalidate_lock);
fs/inode.c
290
lockdep_set_class_and_name(&mapping->invalidate_lock,
fs/inode.c
294
mapping_set_stable_writes(mapping);
fs/inode.c
296
inode->i_mapping = mapping;
fs/inode.c
483
static void __address_space_init_once(struct address_space *mapping)
fs/inode.c
485
xa_init_flags(&mapping->i_pages, XA_FLAGS_LOCK_IRQ | XA_FLAGS_ACCOUNT);
fs/inode.c
486
init_rwsem(&mapping->i_mmap_rwsem);
fs/inode.c
487
INIT_LIST_HEAD(&mapping->i_private_list);
fs/inode.c
488
spin_lock_init(&mapping->i_private_lock);
fs/inode.c
489
mapping->i_mmap = RB_ROOT_CACHED;
fs/inode.c
492
void address_space_init_once(struct address_space *mapping)
fs/inode.c
494
memset(mapping, 0, sizeof(*mapping));
fs/inode.c
495
__address_space_init_once(mapping);
fs/inode.c
721
void dump_mapping(const struct address_space *mapping)
fs/inode.c
735
if (get_kernel_nofault(host, &mapping->host) ||
fs/inode.c
736
get_kernel_nofault(a_ops, &mapping->a_ops)) {
fs/inode.c
737
pr_warn("invalid mapping:%px\n", mapping);
fs/iomap/bio.c
92
gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
fs/iomap/buffered-io.c
1104
struct address_space *mapping = iter->inode->i_mapping;
fs/iomap/buffered-io.c
1105
size_t chunk = mapping_max_folio_size(mapping);
fs/iomap/buffered-io.c
1121
status = balance_dirty_pages_ratelimited_flags(mapping,
fs/iomap/buffered-io.c
115
struct inode *inode = folio->mapping->host;
fs/iomap/buffered-io.c
1155
if (mapping_writably_mapped(mapping))
fs/iomap/buffered-io.c
130
struct inode *inode = folio->mapping->host;
fs/iomap/buffered-io.c
140
struct inode *inode = folio->mapping->host;
fs/iomap/buffered-io.c
1528
struct address_space *mapping = i->inode->i_mapping;
fs/iomap/buffered-io.c
1532
return filemap_write_and_wait_range(mapping, i->pos, end);
fs/iomap/buffered-io.c
1603
struct address_space *mapping = iter->inode->i_mapping;
fs/iomap/buffered-io.c
1613
count = filemap_get_folios_dirty(mapping, &pstart, pend, iter->fbatch);
fs/iomap/buffered-io.c
1634
struct address_space *mapping = inode->i_mapping;
fs/iomap/buffered-io.c
1645
range_dirty = filemap_range_needs_writeback(mapping, iter.pos,
fs/iomap/buffered-io.c
177
struct inode *inode = folio->mapping->host;
fs/iomap/buffered-io.c
1950
struct address_space *mapping = wpc->inode->i_mapping;
fs/iomap/buffered-io.c
1962
while ((folio = writeback_iter(mapping, wpc->wbc, folio, &error))) {
fs/iomap/buffered-io.c
200
struct inode *inode = folio->mapping->host;
fs/iomap/buffered-io.c
37
struct inode *inode = folio->mapping->host;
fs/iomap/buffered-io.c
415
fserror_report_io(folio->mapping->host, FSERR_BUFFERED_READ,
fs/iomap/buffered-io.c
590
.inode = folio->mapping->host,
fs/iomap/buffered-io.c
658
.inode = rac->mapping->host,
fs/iomap/buffered-io.c
665
trace_iomap_readahead(rac->mapping->host, readahead_count(rac));
fs/iomap/buffered-io.c
689
struct inode *inode = folio->mapping->host;
fs/iomap/buffered-io.c
69
struct inode *inode = folio->mapping->host;
fs/iomap/buffered-io.c
732
trace_iomap_release_folio(folio->mapping->host, folio_pos(folio),
fs/iomap/buffered-io.c
749
trace_iomap_invalidate_folio(folio->mapping->host,
fs/iomap/buffered-io.c
764
bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio)
fs/iomap/buffered-io.c
766
struct inode *inode = mapping->host;
fs/iomap/buffered-io.c
771
return filemap_dirty_folio(mapping, folio);
fs/iomap/buffered-io.c
883
if (unlikely(folio->mapping != iter->inode->i_mapping)) {
fs/iomap/fiemap.c
100
.len = i_blocksize(mapping->host),
fs/iomap/fiemap.c
103
const unsigned int blkshift = mapping->host->i_blkbits - SECTOR_SHIFT;
fs/iomap/fiemap.c
106
if (filemap_write_and_wait(mapping))
fs/iomap/fiemap.c
94
iomap_bmap(struct address_space *mapping, sector_t bno,
fs/iomap/fiemap.c
98
.inode = mapping->host,
fs/iomap/fiemap.c
99
.pos = (loff_t)bno << mapping->host->i_blkbits,
fs/isofs/compress.c
305
struct address_space *mapping = inode->i_mapping;
fs/isofs/compress.c
346
pages[i] = grab_cache_page_nowait(mapping, index);
fs/isofs/inode.c
1157
static sector_t _isofs_bmap(struct address_space *mapping, sector_t block)
fs/isofs/inode.c
1159
return generic_block_bmap(mapping,block,isofs_get_block);
fs/isofs/rock.c
697
struct inode *inode = folio->mapping->host;
fs/jbd2/commit.c
1005
mapping = READ_ONCE(bh->b_folio->mapping);
fs/jbd2/commit.c
1006
if (mapping && !sb_is_blkdev_sb(mapping->host->i_sb)) {
fs/jbd2/commit.c
242
struct address_space *mapping = jinode->i_vfs_inode->i_mapping;
fs/jbd2/commit.c
244
return filemap_fdatawait_range_keep_errors(mapping,
fs/jbd2/commit.c
72
if (folio->mapping)
fs/jbd2/commit.c
988
struct address_space *mapping;
fs/jbd2/journal.c
2919
(bh->b_folio && bh->b_folio->mapping));
fs/jffs2/file.c
112
int ret = jffs2_do_readpage_nolock(folio->mapping->host, folio);
fs/jffs2/file.c
119
struct jffs2_inode_info *f = JFFS2_INODE_INFO(folio->mapping->host);
fs/jffs2/file.c
129
struct address_space *mapping,
fs/jffs2/file.c
134
struct inode *inode = mapping->host;
fs/jffs2/file.c
212
folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
fs/jffs2/file.c
213
mapping_gfp_mask(mapping));
fs/jffs2/file.c
244
struct address_space *mapping,
fs/jffs2/file.c
251
struct inode *inode = mapping->host;
fs/jffs2/file.c
26
struct address_space *mapping,
fs/jffs2/file.c
30
struct address_space *mapping,
fs/jffs2/file.c
305
mapping_set_error(mapping, ret);
fs/jfs/inode.c
273
static int jfs_writepages(struct address_space *mapping,
fs/jfs/inode.c
276
return mpage_writepages(mapping, wbc, jfs_get_block);
fs/jfs/inode.c
289
static void jfs_write_failed(struct address_space *mapping, loff_t to)
fs/jfs/inode.c
291
struct inode *inode = mapping->host;
fs/jfs/inode.c
300
struct address_space *mapping,
fs/jfs/inode.c
306
ret = block_write_begin(mapping, pos, len, foliop, jfs_get_block);
fs/jfs/inode.c
308
jfs_write_failed(mapping, pos + len);
fs/jfs/inode.c
314
struct address_space *mapping,
fs/jfs/inode.c
320
ret = generic_write_end(iocb, mapping, pos, len, copied, folio, fsdata);
fs/jfs/inode.c
322
jfs_write_failed(mapping, pos + len);
fs/jfs/inode.c
326
static sector_t jfs_bmap(struct address_space *mapping, sector_t block)
fs/jfs/inode.c
328
return generic_block_bmap(mapping, block, jfs_get_block);
fs/jfs/inode.c
334
struct address_space *mapping = file->f_mapping;
fs/jfs/inode.c
350
jfs_write_failed(mapping, end);
fs/jfs/jfs_metapage.c
109
l2mp_blocks = L2PSIZE - folio->mapping->host->i_blkbits;
fs/jfs/jfs_metapage.c
121
int l2mp_blocks = L2PSIZE - folio->mapping->host->i_blkbits;
fs/jfs/jfs_metapage.c
156
static int __metapage_migrate_folio(struct address_space *mapping,
fs/jfs/jfs_metapage.c
171
rc = filemap_migrate_folio(mapping, dst, src, mode);
fs/jfs/jfs_metapage.c
232
static int __metapage_migrate_folio(struct address_space *mapping,
fs/jfs/jfs_metapage.c
244
rc = filemap_migrate_folio(mapping, dst, src, mode);
fs/jfs/jfs_metapage.c
395
mapping_set_error(folio->mapping, err);
fs/jfs/jfs_metapage.c
428
struct inode *inode = folio->mapping->host;
fs/jfs/jfs_metapage.c
549
static int metapage_writepages(struct address_space *mapping,
fs/jfs/jfs_metapage.c
557
while ((folio = writeback_iter(mapping, wbc, folio, &err)))
fs/jfs/jfs_metapage.c
566
struct inode *inode = folio->mapping->host;
fs/jfs/jfs_metapage.c
645
static int metapage_migrate_folio(struct address_space *mapping,
fs/jfs/jfs_metapage.c
652
return filemap_migrate_folio(mapping, dst, src, mode);
fs/jfs/jfs_metapage.c
658
return __metapage_migrate_folio(mapping, dst, src, mode);
fs/jfs/jfs_metapage.c
689
struct address_space *mapping;
fs/jfs/jfs_metapage.c
709
mapping = JFS_SBI(inode->i_sb)->direct_inode->i_mapping;
fs/jfs/jfs_metapage.c
718
mapping = inode->i_mapping;
fs/jfs/jfs_metapage.c
722
folio = filemap_grab_folio(mapping, page_index);
fs/jfs/jfs_metapage.c
729
folio = read_mapping_folio(mapping, page_index, NULL);
fs/jfs/jfs_metapage.c
805
struct address_space *mapping = folio->mapping;
fs/jfs/jfs_metapage.c
827
ret = filemap_check_errors(mapping);
fs/jfs/jfs_metapage.c
904
struct address_space *mapping =
fs/jfs/jfs_metapage.c
915
struct folio *folio = filemap_lock_folio(mapping,
fs/libfs.c
2085
struct address_space *mapping = iocb->ki_filp->f_mapping;
fs/libfs.c
2108
err = filemap_write_and_wait_range(mapping, pos, end);
fs/libfs.c
2119
invalidate_mapping_pages(mapping, pos >> PAGE_SHIFT, end >> PAGE_SHIFT);
fs/libfs.c
944
int simple_write_begin(const struct kiocb *iocb, struct address_space *mapping,
fs/libfs.c
950
folio = __filemap_get_folio(mapping, pos / PAGE_SIZE, FGP_WRITEBEGIN,
fs/libfs.c
951
mapping_gfp_mask(mapping));
fs/libfs.c
991
struct address_space *mapping,
fs/libfs.c
995
struct inode *inode = folio->mapping->host;
fs/minix/dir.c
288
struct inode *inode = folio->mapping->host;
fs/minix/dir.c
410
struct inode *dir = folio->mapping->host;
fs/minix/dir.c
448
struct inode *inode = folio->mapping->host;
fs/minix/dir.c
45
struct address_space *mapping = folio->mapping;
fs/minix/dir.c
46
struct inode *dir = mapping->host;
fs/minix/inode.c
443
static int minix_writepages(struct address_space *mapping,
fs/minix/inode.c
446
return mpage_writepages(mapping, wbc, minix_get_block);
fs/minix/inode.c
459
static void minix_write_failed(struct address_space *mapping, loff_t to)
fs/minix/inode.c
461
struct inode *inode = mapping->host;
fs/minix/inode.c
470
struct address_space *mapping,
fs/minix/inode.c
476
ret = block_write_begin(mapping, pos, len, foliop, minix_get_block);
fs/minix/inode.c
478
minix_write_failed(mapping, pos + len);
fs/minix/inode.c
483
static sector_t minix_bmap(struct address_space *mapping, sector_t block)
fs/minix/inode.c
485
return generic_block_bmap(mapping,block,minix_get_block);
fs/mpage.c
100
struct inode *inode = folio->mapping->host;
fs/mpage.c
154
struct inode *inode = folio->mapping->host;
fs/mpage.c
171
gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
fs/mpage.c
458
struct address_space *mapping = folio->mapping;
fs/mpage.c
459
struct inode *inode = mapping->host;
fs/mpage.c
64
mapping_set_error(fi.folio->mapping, err);
fs/mpage.c
642
mapping_set_error(mapping, ret);
fs/mpage.c
663
__mpage_writepages(struct address_space *mapping,
fs/mpage.c
676
while ((folio = writeback_iter(mapping, wbc, folio, &error))) {
fs/namei.c
6283
struct address_space *mapping = inode->i_mapping;
fs/namei.c
6286
folio = filemap_get_folio(mapping, 0);
fs/namei.c
6294
folio = read_mapping_folio(mapping, 0, NULL);
fs/namei.c
6299
BUG_ON(mapping_gfp_mask(mapping) & __GFP_HIGHMEM);
fs/namei.c
6369
struct address_space *mapping = inode->i_mapping;
fs/namei.c
6370
const struct address_space_operations *aops = mapping->a_ops;
fs/namei.c
6371
bool nofs = !mapping_gfp_constraint(mapping, __GFP_FS);
fs/namei.c
6380
err = aops->write_begin(NULL, mapping, 0, len-1, &folio, &fsdata);
fs/namei.c
6388
err = aops->write_end(NULL, mapping, 0, len - 1, len - 1,
fs/netfs/buffered_read.c
341
struct netfs_inode *ictx = netfs_inode(ractl->mapping->host);
fs/netfs/buffered_read.c
346
rreq = netfs_alloc_request(ractl->mapping, ractl->file, start, size,
fs/netfs/buffered_read.c
399
struct address_space *mapping = folio->mapping;
fs/netfs/buffered_read.c
401
struct netfs_inode *ctx = netfs_inode(mapping->host);
fs/netfs/buffered_read.c
414
rreq = netfs_alloc_request(mapping, file, folio_pos(folio), flen, NETFS_READ_GAPS);
fs/netfs/buffered_read.c
496
struct address_space *mapping = folio->mapping;
fs/netfs/buffered_read.c
498
struct netfs_inode *ctx = netfs_inode(mapping->host);
fs/netfs/buffered_read.c
508
rreq = netfs_alloc_request(mapping, file,
fs/netfs/buffered_read.c
624
struct file *file, struct address_space *mapping,
fs/netfs/buffered_read.c
634
folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
fs/netfs/buffered_read.c
635
mapping_gfp_mask(mapping));
fs/netfs/buffered_read.c
663
rreq = netfs_alloc_request(mapping, file,
fs/netfs/buffered_read.c
719
struct address_space *mapping = folio->mapping;
fs/netfs/buffered_read.c
720
struct netfs_inode *ctx = netfs_inode(mapping->host);
fs/netfs/buffered_read.c
729
rreq = netfs_alloc_request(mapping, file, start, flen,
fs/netfs/buffered_write.c
111
struct address_space *mapping = inode->i_mapping;
fs/netfs/buffered_write.c
125
size_t max_chunk = mapping_max_folio_size(mapping);
fs/netfs/buffered_write.c
130
wbc_attach_fdatawrite_inode(&wbc, mapping->host);
fs/netfs/buffered_write.c
132
ret = filemap_write_and_wait_range(mapping, pos, pos + iter->count);
fs/netfs/buffered_write.c
179
folio = netfs_grab_folio_for_write(mapping, pos, part);
fs/netfs/buffered_write.c
220
if (mapping_writably_mapped(mapping))
fs/netfs/buffered_write.c
348
ret = filemap_write_and_wait_range(mapping, fpos, fpos + flen - 1);
fs/netfs/buffered_write.c
374
ret = balance_dirty_pages_ratelimited_flags(mapping, bdp_flags);
fs/netfs/buffered_write.c
38
static struct folio *netfs_grab_folio_for_write(struct address_space *mapping,
fs/netfs/buffered_write.c
44
if (mapping_large_folio_support(mapping))
fs/netfs/buffered_write.c
47
return __filemap_get_folio(mapping, index, fgp_flags,
fs/netfs/buffered_write.c
48
mapping_gfp_mask(mapping));
fs/netfs/buffered_write.c
510
struct address_space *mapping = file->f_mapping;
fs/netfs/buffered_write.c
522
if (folio->mapping != mapping)
fs/netfs/buffered_write.c
536
err = filemap_fdatawrite_range(mapping,
fs/netfs/direct_write.c
28
wreq->mapping->nrpages) {
fs/netfs/direct_write.c
330
struct address_space *mapping = file->f_mapping;
fs/netfs/direct_write.c
331
struct inode *inode = mapping->host;
fs/netfs/direct_write.c
360
if (filemap_range_has_page(mapping, pos, end))
fs/netfs/direct_write.c
364
ret = filemap_write_and_wait_range(mapping, pos, end);
fs/netfs/direct_write.c
38
invalidate_inode_pages2_range(wreq->mapping, first, last);
fs/netfs/fscache_io.c
164
struct address_space *mapping;
fs/netfs/fscache_io.c
173
void __fscache_clear_page_bits(struct address_space *mapping,
fs/netfs/fscache_io.c
181
XA_STATE(xas, &mapping->i_pages, first);
fs/netfs/fscache_io.c
200
fscache_clear_page_bits(wreq->mapping, wreq->start, wreq->len,
fs/netfs/fscache_io.c
210
struct address_space *mapping,
fs/netfs/fscache_io.c
229
wreq->mapping = mapping;
fs/netfs/fscache_io.c
251
iov_iter_xarray(&iter, ITER_SOURCE, &mapping->i_pages, start, len);
fs/netfs/fscache_io.c
261
fscache_clear_page_bits(mapping, start, len, cond);
fs/netfs/internal.h
197
struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
fs/netfs/internal.h
83
struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
fs/netfs/misc.c
136
bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio)
fs/netfs/misc.c
138
struct inode *inode = mapping->host;
fs/netfs/misc.c
145
if (!filemap_dirty_folio(mapping, folio))
fs/netfs/misc.c
19
int netfs_alloc_folioq_buffer(struct address_space *mapping,
fs/netfs/misc.c
60
folio->mapping = mapping;
fs/netfs/objects.c
18
struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
fs/netfs/objects.c
24
struct inode *inode = file ? file_inode(file) : mapping->host;
fs/netfs/objects.c
44
rreq->mapping = mapping;
fs/netfs/read_pgpriv2.c
105
creq = netfs_create_write_req(rreq->mapping, NULL, folio_pos(folio),
fs/netfs/write_collect.c
265
mapping_set_error(wreq->mapping, front->error);
fs/netfs/write_collect.c
60
struct netfs_inode *ictx = netfs_inode(folio->mapping->host);
fs/netfs/write_issue.c
102
wreq = netfs_alloc_request(mapping, file, start, 0, origin);
fs/netfs/write_issue.c
43
static void netfs_kill_dirty_pages(struct address_space *mapping,
fs/netfs/write_issue.c
550
int netfs_writepages(struct address_space *mapping,
fs/netfs/write_issue.c
553
struct netfs_inode *ictx = netfs_inode(mapping->host);
fs/netfs/write_issue.c
568
folio = writeback_iter(mapping, wbc, NULL, &error);
fs/netfs/write_issue.c
572
wreq = netfs_create_write_req(mapping, NULL, folio_pos(folio), NETFS_WRITEBACK);
fs/netfs/write_issue.c
597
} while ((folio = writeback_iter(mapping, wbc, folio, &error)));
fs/netfs/write_issue.c
609
netfs_kill_dirty_pages(mapping, wbc, folio);
fs/netfs/write_issue.c
822
int netfs_writeback_single(struct address_space *mapping,
fs/netfs/write_issue.c
827
struct netfs_inode *ictx = netfs_inode(mapping->host);
fs/netfs/write_issue.c
84
} while ((folio = writeback_iter(mapping, wbc, folio, &error)));
fs/netfs/write_issue.c
844
wreq = netfs_create_write_req(mapping, NULL, 0, NETFS_WRITEBACK_SINGLE);
fs/netfs/write_issue.c
90
struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
fs/nfs/blocklayout/blocklayout.c
890
struct address_space *mapping = inode->i_mapping;
fs/nfs/blocklayout/blocklayout.c
897
end = page_cache_next_miss(mapping, idx + 1, ULONG_MAX);
fs/nfs/dir.c
1000
u64 change_attr = inode_peek_iversion_raw(mapping->host);
fs/nfs/dir.c
1004
folio = nfs_readdir_folio_get_locked(mapping, cookie, change_attr);
fs/nfs/dir.c
400
static struct folio *nfs_readdir_folio_get_locked(struct address_space *mapping,
fs/nfs/dir.c
406
folio = filemap_grab_folio(mapping, index);
fs/nfs/dir.c
444
static struct folio *nfs_readdir_folio_get_next(struct address_space *mapping,
fs/nfs/dir.c
450
folio = __filemap_get_folio(mapping, index,
fs/nfs/dir.c
452
mapping_gfp_mask(mapping));
fs/nfs/dir.c
830
struct address_space *mapping = desc->file->f_mapping;
fs/nfs/dir.c
854
if (folio->mapping != mapping) {
fs/nfs/dir.c
863
new = nfs_readdir_folio_get_next(mapping, cookie,
fs/nfs/dir.c
999
struct address_space *mapping = desc->file->f_mapping;
fs/nfs/direct.c
1011
nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
fs/nfs/direct.c
1054
if (mapping->nrpages) {
fs/nfs/direct.c
1055
invalidate_inode_pages2_range(mapping,
fs/nfs/direct.c
436
struct address_space *mapping = file->f_mapping;
fs/nfs/direct.c
437
struct inode *inode = mapping->host;
fs/nfs/direct.c
442
nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
fs/nfs/direct.c
994
struct address_space *mapping = file->f_mapping;
fs/nfs/direct.c
995
struct inode *inode = mapping->host;
fs/nfs/file.c
286
void nfs_truncate_last_folio(struct address_space *mapping, loff_t from,
fs/nfs/file.c
294
folio = filemap_lock_folio(mapping, from >> PAGE_SHIFT);
fs/nfs/file.c
309
trace_nfs_size_truncate_folio(mapping->host, to);
fs/nfs/file.c
383
struct address_space *mapping,
fs/nfs/file.c
395
file, mapping->host->i_ino, len, (long long) pos);
fs/nfs/file.c
396
nfs_truncate_last_folio(mapping, i_size_read(mapping->host), pos);
fs/nfs/file.c
399
folio = write_begin_get_folio(iocb, mapping, pos >> PAGE_SHIFT, len);
fs/nfs/file.c
425
struct address_space *mapping,
fs/nfs/file.c
436
file, mapping->host->i_ino, len, (long long) pos);
fs/nfs/file.c
467
NFS_I(mapping->host)->write_io += copied;
fs/nfs/file.c
469
if (nfs_ctx_key_to_expire(ctx, mapping->host))
fs/nfs/file.c
470
nfs_wb_all(mapping->host);
fs/nfs/file.c
486
struct inode *inode = folio->mapping->host;
fs/nfs/file.c
514
if (nfs_wb_folio_reclaim(folio->mapping->host, folio) < 0 ||
fs/nfs/file.c
525
struct address_space *mapping = folio->mapping;
fs/nfs/file.c
532
nfsi = NFS_I(mapping->host);
fs/nfs/file.c
557
struct inode *inode = folio->mapping->host;
fs/nfs/file.c
647
struct address_space *mapping;
fs/nfs/file.c
668
mapping = folio->mapping;
fs/nfs/file.c
669
if (mapping != inode->i_mapping)
fs/nfs/fscache.c
254
struct inode *inode = ractl->mapping->host;
fs/nfs/fscache.c
324
xa_for_each_range(&sreq->rreq->mapping->i_pages, idx, page, start, last) {
fs/nfs/fscache.c
349
struct inode *inode = folio->mapping->host;
fs/nfs/fscache.h
109
fscache_note_page_release(netfs_i_cookie(netfs_inode(folio->mapping->host)));
fs/nfs/inode.c
146
int nfs_sync_mapping(struct address_space *mapping)
fs/nfs/inode.c
1470
static int nfs_invalidate_mapping(struct inode *inode, struct address_space *mapping)
fs/nfs/inode.c
1475
if (mapping->nrpages != 0) {
fs/nfs/inode.c
1477
ret = nfs_sync_mapping(mapping);
fs/nfs/inode.c
1481
ret = invalidate_inode_pages2(mapping);
fs/nfs/inode.c
1499
int nfs_clear_invalid_mapping(struct address_space *mapping)
fs/nfs/inode.c
150
if (mapping->nrpages != 0) {
fs/nfs/inode.c
1501
struct inode *inode = mapping->host;
fs/nfs/inode.c
151
unmap_mapping_range(mapping, 0, 0, 0);
fs/nfs/inode.c
152
ret = nfs_wb_all(mapping->host);
fs/nfs/inode.c
1548
ret = nfs_invalidate_mapping(inode, mapping);
fs/nfs/inode.c
1590
int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping)
fs/nfs/inode.c
1602
return nfs_clear_invalid_mapping(mapping);
fs/nfs/inode.c
260
void nfs_zap_mapping(struct inode *inode, struct address_space *mapping)
fs/nfs/inode.c
262
if (mapping->nrpages != 0) {
fs/nfs/internal.h
435
void nfs_truncate_last_folio(struct address_space *mapping, loff_t from,
fs/nfs/internal.h
635
int nfs_filemap_write_and_wait_range(struct address_space *mapping,
fs/nfs/internal.h
855
struct inode *inode = folio->mapping->host;
fs/nfs/internal.h
872
loff_t i_size = i_size_read(folio->mapping->host);
fs/nfs/nfs42proc.c
388
struct address_space *mapping = file->f_mapping;
fs/nfs/nfs42proc.c
392
nfs_truncate_last_folio(mapping, oldsize, pos);
fs/nfs/nfs42proc.c
393
WARN_ON_ONCE(invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT,
fs/nfs/read.c
290
struct inode *inode = folio->mapping->host;
fs/nfs/read.c
412
struct inode *inode = ractl->mapping->host;
fs/nfs/symlink.c
31
struct inode *inode = folio->mapping->host;
fs/nfs/write.c
1064
error = nfs_wb_folio(folio->mapping->host, folio);
fs/nfs/write.c
1142
status = nfs_wb_folio(folio->mapping->host, folio);
fs/nfs/write.c
1216
struct inode *inode = folio->mapping->host;
fs/nfs/write.c
1297
struct address_space *mapping = folio->mapping;
fs/nfs/write.c
1298
struct inode *inode = mapping->host;
fs/nfs/write.c
1323
nfs_set_pageerror(mapping);
fs/nfs/write.c
172
struct address_space *mapping = folio->mapping;
fs/nfs/write.c
177
spin_lock(&mapping->i_private_lock);
fs/nfs/write.c
183
spin_unlock(&mapping->i_private_lock);
fs/nfs/write.c
191
struct inode *inode = folio->mapping->host;
fs/nfs/write.c
1965
int nfs_filemap_write_and_wait_range(struct address_space *mapping,
fs/nfs/write.c
1970
ret = filemap_write_and_wait_range(mapping, lstart, lend);
fs/nfs/write.c
1972
ret = pnfs_sync_inode(mapping->host, true);
fs/nfs/write.c
2103
int nfs_migrate_folio(struct address_space *mapping, struct folio *dst,
fs/nfs/write.c
2116
nfs_wb_folio(src->mapping->host, src);
fs/nfs/write.c
2127
return migrate_folio(mapping, dst, src, mode);
fs/nfs/write.c
215
static void nfs_set_pageerror(struct address_space *mapping)
fs/nfs/write.c
217
struct inode *inode = mapping->host;
fs/nfs/write.c
219
nfs_zap_mapping(mapping->host, mapping);
fs/nfs/write.c
230
struct address_space *mapping = folio->mapping;
fs/nfs/write.c
232
filemap_set_wb_err(mapping, error);
fs/nfs/write.c
233
if (mapping->host)
fs/nfs/write.c
234
errseq_set(&mapping->host->i_sb->s_wb_err,
fs/nfs/write.c
236
nfs_set_pageerror(mapping);
fs/nfs/write.c
288
struct nfs_server *nfss = NFS_SERVER(folio->mapping->host);
fs/nfs/write.c
297
struct nfs_server *nfss = NFS_SERVER(folio->mapping->host);
fs/nfs/write.c
513
struct inode *inode = folio->mapping->host;
fs/nfs/write.c
618
nfs_add_stats(folio->mapping->host, NFSIOS_WRITEPAGES, 1);
fs/nfs/write.c
633
struct inode *inode = folio->mapping->host;
fs/nfs/write.c
650
int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
fs/nfs/write.c
652
struct inode *inode = mapping->host;
fs/nfs/write.c
687
while ((folio = writeback_iter(mapping, wbc, folio, &err))) {
fs/nfs/write.c
711
struct address_space *mapping = folio->mapping;
fs/nfs/write.c
712
struct nfs_inode *nfsi = NFS_I(mapping->host);
fs/nfs/write.c
718
spin_lock(&mapping->i_private_lock);
fs/nfs/write.c
722
spin_unlock(&mapping->i_private_lock);
fs/nfs/write.c
742
struct address_space *mapping = folio->mapping;
fs/nfs/write.c
744
spin_lock(&mapping->i_private_lock);
fs/nfs/write.c
750
spin_unlock(&mapping->i_private_lock);
fs/nfs/write.c
875
wb_stat_mod(&inode_to_bdi(folio->mapping->host)->wb,
fs/nfsd/filecache.c
307
struct address_space *mapping;
fs/nfsd/filecache.c
320
mapping = file->f_mapping;
fs/nfsd/filecache.c
321
return mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) ||
fs/nfsd/filecache.c
322
mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK);
fs/nilfs2/btnode.c
161
struct address_space *mapping;
fs/nilfs2/btnode.c
172
mapping = folio->mapping;
fs/nilfs2/btnode.c
176
if (!still_dirty && mapping)
fs/nilfs2/btnode.c
177
invalidate_inode_pages2_range(mapping, index, index);
fs/nilfs2/btree.c
402
inode = bh->b_folio->mapping->host;
fs/nilfs2/dir.c
112
struct inode *dir = folio->mapping->host;
fs/nilfs2/dir.c
190
struct address_space *mapping = dir->i_mapping;
fs/nilfs2/dir.c
191
struct folio *folio = read_mapping_folio(mapping, n, NULL);
fs/nilfs2/dir.c
409
struct address_space *mapping = folio->mapping;
fs/nilfs2/dir.c
420
nilfs_commit_chunk(folio, mapping, from, to);
fs/nilfs2/dir.c
508
nilfs_commit_chunk(folio, folio->mapping, from, to);
fs/nilfs2/dir.c
526
struct address_space *mapping = folio->mapping;
fs/nilfs2/dir.c
527
struct inode *inode = mapping->host;
fs/nilfs2/dir.c
558
nilfs_commit_chunk(folio, mapping, from, to);
fs/nilfs2/dir.c
569
struct address_space *mapping = inode->i_mapping;
fs/nilfs2/dir.c
570
struct folio *folio = filemap_grab_folio(mapping, 0);
fs/nilfs2/dir.c
600
nilfs_commit_chunk(folio, mapping, 0, chunk_size);
fs/nilfs2/dir.c
91
struct address_space *mapping, size_t from, size_t to)
fs/nilfs2/dir.c
93
struct inode *dir = mapping->host;
fs/nilfs2/file.c
60
if (folio->mapping != inode->i_mapping ||
fs/nilfs2/gcinode.c
137
struct inode *inode = bh->b_folio->mapping->host;
fs/nilfs2/inode.c
159
static int nilfs_writepages(struct address_space *mapping,
fs/nilfs2/inode.c
162
struct inode *inode = mapping->host;
fs/nilfs2/inode.c
166
nilfs_clear_dirty_pages(mapping);
fs/nilfs2/inode.c
177
static bool nilfs_dirty_folio(struct address_space *mapping,
fs/nilfs2/inode.c
180
struct inode *inode = mapping->host;
fs/nilfs2/inode.c
183
bool ret = filemap_dirty_folio(mapping, folio);
fs/nilfs2/inode.c
188
spin_lock(&mapping->i_private_lock);
fs/nilfs2/inode.c
204
spin_unlock(&mapping->i_private_lock);
fs/nilfs2/inode.c
211
void nilfs_write_failed(struct address_space *mapping, loff_t to)
fs/nilfs2/inode.c
213
struct inode *inode = mapping->host;
fs/nilfs2/inode.c
222
struct address_space *mapping,
fs/nilfs2/inode.c
227
struct inode *inode = mapping->host;
fs/nilfs2/inode.c
233
err = block_write_begin(mapping, pos, len, foliop, nilfs_get_block);
fs/nilfs2/inode.c
235
nilfs_write_failed(mapping, pos + len);
fs/nilfs2/inode.c
242
struct address_space *mapping,
fs/nilfs2/inode.c
246
struct inode *inode = mapping->host;
fs/nilfs2/inode.c
253
copied = generic_write_end(iocb, mapping, pos, len, copied, folio,
fs/nilfs2/mdt.c
399
struct inode *inode = folio->mapping->host;
fs/nilfs2/mdt.c
429
static int nilfs_mdt_writeback(struct address_space *mapping,
fs/nilfs2/mdt.c
435
while ((folio = writeback_iter(mapping, wbc, folio, &error)))
fs/nilfs2/nilfs.h
305
extern void nilfs_write_failed(struct address_space *mapping, loff_t to);
fs/nilfs2/page.c
164
m = folio->mapping;
fs/nilfs2/page.c
338
folio->mapping = NULL;
fs/nilfs2/page.c
341
folio->mapping = dmap;
fs/nilfs2/page.c
361
void nilfs_clear_dirty_pages(struct address_space *mapping)
fs/nilfs2/page.c
369
while (filemap_get_folios_tag(mapping, &index, (pgoff_t)-1,
fs/nilfs2/page.c
381
if (likely(folio->mapping == mapping))
fs/nilfs2/page.c
47
struct address_space *mapping,
fs/nilfs2/page.c
476
struct address_space *mapping = folio->mapping;
fs/nilfs2/page.c
478
if (mapping) {
fs/nilfs2/page.c
479
xa_lock_irq(&mapping->i_pages);
fs/nilfs2/page.c
481
__xa_clear_mark(&mapping->i_pages, folio->index,
fs/nilfs2/page.c
483
xa_unlock_irq(&mapping->i_pages);
fs/nilfs2/page.c
487
xa_unlock_irq(&mapping->i_pages);
fs/nilfs2/page.c
56
folio = filemap_grab_folio(mapping, index);
fs/nilfs2/page.h
45
void nilfs_clear_dirty_pages(struct address_space *mapping);
fs/nilfs2/segment.c
1629
inode = bh->b_folio->mapping->host;
fs/nilfs2/segment.c
1811
filemap_dirty_folio(folio->mapping, folio);
fs/nilfs2/segment.c
703
struct address_space *mapping = inode->i_mapping;
fs/nilfs2/segment.c
721
!filemap_get_folios_tag(mapping, &index, last,
fs/nilfs2/segment.c
730
if (unlikely(folio->mapping != mapping)) {
fs/ntfs3/attrib.c
319
struct address_space *mapping = ni->vfs_inode.i_mapping;
fs/ntfs3/attrib.c
323
mapping, 0, FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
fs/ntfs3/attrib.c
324
mapping_gfp_mask(mapping));
fs/ntfs3/bitmap.c
512
struct address_space *mapping = sb->s_bdev->bd_mapping;
fs/ntfs3/bitmap.c
524
file_ra_state_init(ra, mapping);
fs/ntfs3/bitmap.c
567
page_cache_sync_readahead(mapping, ra, NULL,
fs/ntfs3/file.c
1041
err = ntfs_get_frame_pages(mapping, frame_vbo >> PAGE_SHIFT,
fs/ntfs3/file.c
1104
err = ntfs_get_frame_pages(mapping, index, pages,
fs/ntfs3/file.c
354
struct address_space *mapping = inode->i_mapping;
fs/ntfs3/file.c
389
err = filemap_fdatawrite_range(mapping, pos, end - 1);
fs/ntfs3/file.c
390
err2 = sync_mapping_buffers(mapping);
fs/ntfs3/file.c
397
err = filemap_fdatawait_range(mapping, pos, end - 1);
fs/ntfs3/file.c
452
struct address_space *mapping = inode->i_mapping;
fs/ntfs3/file.c
503
filemap_invalidate_lock(mapping);
fs/ntfs3/file.c
511
err = filemap_write_and_wait_range(mapping, vbo_down,
fs/ntfs3/file.c
568
err = filemap_write_and_wait_range(mapping, vbo_down, vbo);
fs/ntfs3/file.c
576
err = filemap_write_and_wait_range(mapping, end, LLONG_MAX);
fs/ntfs3/file.c
594
err = filemap_write_and_wait_range(mapping, vbo_down,
fs/ntfs3/file.c
707
filemap_invalidate_unlock(mapping);
fs/ntfs3/file.c
939
static int ntfs_get_frame_pages(struct address_space *mapping, pgoff_t index,
fs/ntfs3/file.c
943
gfp_t gfp_mask = mapping_gfp_mask(mapping);
fs/ntfs3/file.c
951
folio = __filemap_get_folio(mapping, index,
fs/ntfs3/file.c
984
struct address_space *mapping = inode->i_mapping;
fs/ntfs3/frecord.c
1855
static struct page *ntfs_lock_new_page(struct address_space *mapping,
fs/ntfs3/frecord.c
1859
mapping, index, FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
fs/ntfs3/frecord.c
1888
struct address_space *mapping = folio->mapping;
fs/ntfs3/frecord.c
1926
gfp_mask = mapping_gfp_mask(mapping);
fs/ntfs3/frecord.c
1932
pg = ntfs_lock_new_page(mapping, index, gfp_mask);
fs/ntfs3/frecord.c
1973
struct address_space *mapping = inode->i_mapping;
fs/ntfs3/frecord.c
1974
gfp_t gfp_mask = mapping_gfp_mask(mapping);
fs/ntfs3/frecord.c
2028
pg = ntfs_lock_new_page(mapping, index, gfp_mask);
fs/ntfs3/frecord.c
2150
mapping->a_ops = &ntfs_aops;
fs/ntfs3/fsntfs.c
1190
struct address_space *mapping = sb->s_bdev->bd_mapping;
fs/ntfs3/fsntfs.c
1231
file_ra_state_init(ra, mapping);
fs/ntfs3/fsntfs.c
1240
page_cache_sync_readahead(mapping, ra, NULL,
fs/ntfs3/fsntfs.c
1516
struct address_space *mapping = sb->s_bdev->bd_mapping;
fs/ntfs3/fsntfs.c
1541
folio = read_mapping_folio(mapping, lbo >> PAGE_SHIFT, NULL);
fs/ntfs3/inode.c
1042
struct address_space *mapping = folio->mapping;
fs/ntfs3/inode.c
1043
struct inode *inode = mapping->host;
fs/ntfs3/inode.c
1060
mapping_set_error(mapping, ret);
fs/ntfs3/inode.c
1064
static int ntfs_writepages(struct address_space *mapping,
fs/ntfs3/inode.c
1068
struct inode *inode = mapping->host;
fs/ntfs3/inode.c
1071
.inode = mapping->host,
fs/ntfs3/inode.c
1086
while ((folio = writeback_iter(mapping, wbc, folio, &err)))
fs/ntfs3/inode.c
1112
struct address_space *mapping = inode->i_mapping;
fs/ntfs3/inode.c
1116
struct page *page = read_mapping_page(mapping, idx, NULL);
fs/ntfs3/inode.c
557
static sector_t ntfs_bmap(struct address_space *mapping, sector_t block)
fs/ntfs3/inode.c
559
struct inode *inode = mapping->host;
fs/ntfs3/inode.c
568
if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
fs/ntfs3/inode.c
578
return iomap_bmap(mapping, block, &ntfs_iomap_ops);
fs/ntfs3/inode.c
588
struct inode *inode = folio->mapping->host;
fs/ntfs3/inode.c
626
gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
fs/ntfs3/inode.c
670
struct address_space *mapping = folio->mapping;
fs/ntfs3/inode.c
671
struct inode *inode = mapping->host;
fs/ntfs3/inode.c
703
struct address_space *mapping = rac->mapping;
fs/ntfs3/inode.c
704
struct inode *inode = mapping->host;
fs/ocfs2/alloc.c
6908
struct address_space *mapping = inode->i_mapping;
fs/ocfs2/alloc.c
6918
folios[numfolios] = __filemap_get_folio(mapping, index,
fs/ocfs2/aops.c
1002
static int ocfs2_grab_folios_for_write(struct address_space *mapping,
fs/ocfs2/aops.c
1008
struct inode *inode = mapping->host;
fs/ocfs2/aops.c
1052
if (mmap_folio->mapping != mapping) {
fs/ocfs2/aops.c
1053
WARN_ON(mmap_folio->mapping);
fs/ocfs2/aops.c
1068
wc->w_folios[i] = __filemap_get_folio(mapping, index,
fs/ocfs2/aops.c
1092
static int ocfs2_write_cluster(struct address_space *mapping,
fs/ocfs2/aops.c
1103
struct inode *inode = mapping->host;
fs/ocfs2/aops.c
1195
static int ocfs2_write_cluster_by_desc(struct address_space *mapping,
fs/ocfs2/aops.c
1205
struct ocfs2_super *osb = OCFS2_SB(mapping->host->i_sb);
fs/ocfs2/aops.c
1219
ret = ocfs2_write_cluster(mapping, &desc->c_phys,
fs/ocfs2/aops.c
1448
static int ocfs2_write_begin_inline(struct address_space *mapping,
fs/ocfs2/aops.c
1465
folio = __filemap_get_folio(mapping, 0,
fs/ocfs2/aops.c
1516
static int ocfs2_try_to_write_inline_data(struct address_space *mapping,
fs/ocfs2/aops.c
1562
ret = ocfs2_write_begin_inline(mapping, inode, wc);
fs/ocfs2/aops.c
1623
int ocfs2_write_begin_nolock(struct address_space *mapping,
fs/ocfs2/aops.c
1631
struct inode *inode = mapping->host;
fs/ocfs2/aops.c
1648
ret = ocfs2_try_to_write_inline_data(mapping, inode, pos, len,
fs/ocfs2/aops.c
1776
ret = ocfs2_grab_folios_for_write(mapping, wc, wc->w_cpos, pos, len,
fs/ocfs2/aops.c
1795
ret = ocfs2_write_cluster_by_desc(mapping, data_ac, meta_ac, wc, pos,
fs/ocfs2/aops.c
1860
struct address_space *mapping,
fs/ocfs2/aops.c
1866
struct inode *inode = mapping->host;
fs/ocfs2/aops.c
1883
ret = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_BUFFER,
fs/ocfs2/aops.c
1925
int ocfs2_write_end_nolock(struct address_space *mapping, loff_t pos,
fs/ocfs2/aops.c
1930
struct inode *inode = mapping->host;
fs/ocfs2/aops.c
2052
struct address_space *mapping,
fs/ocfs2/aops.c
2057
struct inode *inode = mapping->host;
fs/ocfs2/aops.c
2059
ret = ocfs2_write_end_nolock(mapping, pos, len, copied, fsdata);
fs/ocfs2/aops.c
264
struct inode *inode = folio->mapping->host;
fs/ocfs2/aops.c
337
struct inode *inode = rac->mapping->host;
fs/ocfs2/aops.c
384
static int ocfs2_writepages(struct address_space *mapping,
fs/ocfs2/aops.c
387
return mpage_writepages(mapping, wbc, ocfs2_get_block);
fs/ocfs2/aops.c
426
static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block)
fs/ocfs2/aops.c
431
struct inode *inode = mapping->host;
fs/ocfs2/aops.h
25
int ocfs2_write_end_nolock(struct address_space *mapping,
fs/ocfs2/aops.h
34
int ocfs2_write_begin_nolock(struct address_space *mapping,
fs/ocfs2/dlmglue.c
3945
struct address_space *mapping;
fs/ocfs2/dlmglue.c
3949
mapping = inode->i_mapping;
fs/ocfs2/dlmglue.c
3968
unmap_mapping_range(mapping, 0, 0, 0);
fs/ocfs2/dlmglue.c
3970
if (filemap_fdatawrite(mapping)) {
fs/ocfs2/dlmglue.c
3974
sync_mapping_buffers(mapping);
fs/ocfs2/dlmglue.c
3976
truncate_inode_pages(mapping, 0);
fs/ocfs2/dlmglue.c
3983
filemap_fdatawait(mapping);
fs/ocfs2/file.c
1531
struct address_space *mapping = inode->i_mapping;
fs/ocfs2/file.c
1538
unmap_mapping_range(mapping, start, end - start, 0);
fs/ocfs2/file.c
1539
truncate_inode_pages_range(mapping, start, end - 1);
fs/ocfs2/file.c
1777
struct address_space *mapping = inode->i_mapping;
fs/ocfs2/file.c
1817
unmap_mapping_range(mapping, 0, 0, 0);
fs/ocfs2/file.c
1818
truncate_inode_pages(mapping, 0);
fs/ocfs2/file.c
758
struct address_space *mapping = inode->i_mapping;
fs/ocfs2/file.c
778
folio = __filemap_get_folio(mapping, index,
fs/ocfs2/mmap.c
106
err = ocfs2_write_end_nolock(mapping, pos, len, len, fsdata);
fs/ocfs2/mmap.c
52
struct address_space *mapping = inode->i_mapping;
fs/ocfs2/mmap.c
75
if ((folio->mapping != inode->i_mapping) ||
fs/ocfs2/mmap.c
93
err = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_MMAP,
fs/ocfs2/refcounttree.c
2909
struct address_space *mapping = inode->i_mapping;
fs/ocfs2/refcounttree.c
2937
folio = __filemap_get_folio(mapping, page_index,
fs/ocfs2/refcounttree.c
2954
ret = filemap_write_and_wait_range(mapping,
fs/ocfs2/symlink.c
57
struct inode *inode = folio->mapping->host;
fs/omfs/file.c
298
omfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
fs/omfs/file.c
300
return mpage_writepages(mapping, wbc, omfs_get_block);
fs/omfs/file.c
303
static void omfs_write_failed(struct address_space *mapping, loff_t to)
fs/omfs/file.c
305
struct inode *inode = mapping->host;
fs/omfs/file.c
314
struct address_space *mapping,
fs/omfs/file.c
320
ret = block_write_begin(mapping, pos, len, foliop, omfs_get_block);
fs/omfs/file.c
322
omfs_write_failed(mapping, pos + len);
fs/omfs/file.c
327
static sector_t omfs_bmap(struct address_space *mapping, sector_t block)
fs/omfs/file.c
329
return generic_block_bmap(mapping, block, omfs_get_block);
fs/open.c
985
struct address_space *mapping = inode->i_mapping;
fs/open.c
994
unmap_mapping_range(mapping, 0, 0, 0);
fs/open.c
995
truncate_inode_pages(mapping, 0);
fs/orangefs/file.c
285
struct address_space *mapping = inode->i_mapping;
fs/orangefs/file.c
308
unmap_mapping_range(mapping, 0, 0, 0);
fs/orangefs/file.c
309
ret = filemap_write_and_wait(mapping);
fs/orangefs/file.c
311
ret = invalidate_inode_pages2(mapping);
fs/orangefs/inode.c
110
mapping_set_error(ow->mapping, ret);
fs/orangefs/inode.c
167
mapping_set_error(folio->mapping, ret);
fs/orangefs/inode.c
179
static int orangefs_writepages(struct address_space *mapping,
fs/orangefs/inode.c
202
ow->mapping = mapping;
fs/orangefs/inode.c
204
while ((folio = writeback_iter(mapping, wbc, folio, &error)))
fs/orangefs/inode.c
22
struct inode *inode = folio->mapping->host;
fs/orangefs/inode.c
221
struct inode *inode = rac->mapping->host;
fs/orangefs/inode.c
240
i_pages = &rac->mapping->i_pages;
fs/orangefs/inode.c
263
struct inode *inode = folio->mapping->host;
fs/orangefs/inode.c
289
struct address_space *mapping, loff_t pos,
fs/orangefs/inode.c
297
folio = __filemap_get_folio(mapping, pos / PAGE_SIZE, FGP_WRITEBEGIN,
fs/orangefs/inode.c
298
mapping_gfp_mask(mapping));
fs/orangefs/inode.c
345
struct address_space *mapping,
fs/orangefs/inode.c
349
struct inode *inode = folio->mapping->host;
fs/orangefs/inode.c
58
mapping_set_error(folio->mapping, ret);
fs/orangefs/inode.c
660
if (folio->mapping != inode->i_mapping) {
fs/orangefs/inode.c
73
struct address_space *mapping;
fs/orangefs/inode.c
81
struct inode *inode = ow->mapping->host;
fs/proc/page.c
151
unsigned long mapping;
fs/proc/page.c
166
mapping = (unsigned long)folio->mapping;
fs/proc/page.c
167
is_anon = mapping & FOLIO_MAPPING_ANON;
fs/proc/page.c
176
if (mapping & FOLIO_MAPPING_KSM)
fs/proc/vmcore.c
473
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
fs/proc/vmcore.c
481
page = find_or_create_page(mapping, index, GFP_KERNEL);
fs/qnx4/inode.c
272
static sector_t qnx4_bmap(struct address_space *mapping, sector_t block)
fs/qnx4/inode.c
274
return generic_block_bmap(mapping,block,qnx4_get_block);
fs/qnx6/dir.c
56
struct address_space *mapping = sbi->longfile->i_mapping;
fs/qnx6/dir.c
57
struct folio *folio = read_mapping_folio(mapping, n, NULL);
fs/qnx6/inode.c
185
struct address_space *mapping = root->i_mapping;
fs/qnx6/inode.c
186
struct folio *folio = read_mapping_folio(mapping, 0, NULL);
fs/qnx6/inode.c
485
static sector_t qnx6_bmap(struct address_space *mapping, sector_t block)
fs/qnx6/inode.c
487
return generic_block_bmap(mapping, block, qnx6_get_block);
fs/qnx6/inode.c
517
struct address_space *mapping;
fs/qnx6/inode.c
538
mapping = sbi->inodes->i_mapping;
fs/qnx6/inode.c
539
folio = read_mapping_folio(mapping, n, NULL);
fs/remap_range.c
229
src_folio->mapping != src->f_mapping ||
fs/remap_range.c
230
dst_folio->mapping != dest->f_mapping) {
fs/romfs/super.c
104
struct inode *inode = folio->mapping->host;
fs/smb/client/file.c
219
__func__, rreq->debug_id, subreq->debug_index, rreq->mapping,
fs/smb/server/vfs.c
898
struct address_space *mapping;
fs/smb/server/vfs.c
900
mapping = filp->f_mapping;
fs/smb/server/vfs.c
902
if (!option || !mapping)
fs/smb/server/vfs.c
908
filp->f_ra.ra_pages = inode_to_bdi(mapping->host)->ra_pages * 2;
fs/splice.c
132
if (!folio->mapping) {
fs/splice.c
68
struct address_space *mapping;
fs/splice.c
72
mapping = folio_mapping(folio);
fs/splice.c
73
if (mapping) {
fs/splice.c
93
if (remove_mapping(mapping, folio)) {
fs/squashfs/block.c
196
static struct page *squashfs_get_cache_page(struct address_space *mapping,
fs/squashfs/block.c
201
if (!mapping)
fs/squashfs/block.c
204
page = find_get_page(mapping, index);
fs/squashfs/block.c
98
if (folio->mapping == cache_mapping) {
fs/squashfs/file.c
396
struct address_space *mapping = folio->mapping;
fs/squashfs/file.c
397
struct inode *inode = mapping->host;
fs/squashfs/file.c
417
__filemap_get_folio(mapping, i,
fs/squashfs/file.c
419
mapping_gfp_mask(mapping));
fs/squashfs/file.c
438
struct inode *inode = folio->mapping->host;
fs/squashfs/file.c
464
struct inode *inode = folio->mapping->host;
fs/squashfs/file.c
576
struct inode *inode = ractl->mapping->host;
fs/squashfs/file_cache.c
23
struct inode *i = folio->mapping->host;
fs/squashfs/file_direct.c
26
struct inode *inode = folio->mapping->host;
fs/squashfs/file_direct.c
50
grab_cache_page_nowait(folio->mapping, index);
fs/squashfs/symlink.c
35
struct inode *inode = folio->mapping->host;
fs/sync.c
228
struct address_space *mapping;
fs/sync.c
273
mapping = file->f_mapping;
fs/sync.c
284
ret = filemap_fdatawrite_range(mapping, offset,
fs/sync.c
287
ret = filemap_flush_range(mapping, offset, endbyte);
fs/ubifs/file.c
104
struct inode *inode = folio->mapping->host;
fs/ubifs/file.c
1051
static int ubifs_writepages(struct address_space *mapping,
fs/ubifs/file.c
1057
while ((folio = writeback_iter(mapping, wbc, folio, &error)))
fs/ubifs/file.c
1287
struct inode *inode = folio->mapping->host;
fs/ubifs/file.c
1443
static bool ubifs_dirty_folio(struct address_space *mapping,
fs/ubifs/file.c
1447
struct ubifs_info *c = mapping->host->i_sb->s_fs_info;
fs/ubifs/file.c
1449
ret = filemap_dirty_folio(mapping, folio);
fs/ubifs/file.c
1460
struct inode *inode = folio->mapping->host;
fs/ubifs/file.c
1540
if (unlikely(folio->mapping != inode->i_mapping ||
fs/ubifs/file.c
1553
filemap_dirty_folio(folio->mapping, folio);
fs/ubifs/file.c
205
static int write_begin_slow(struct address_space *mapping,
fs/ubifs/file.c
208
struct inode *inode = mapping->host;
fs/ubifs/file.c
234
folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
fs/ubifs/file.c
235
mapping_gfp_mask(mapping));
fs/ubifs/file.c
408
struct address_space *mapping,
fs/ubifs/file.c
412
struct inode *inode = mapping->host;
fs/ubifs/file.c
427
folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
fs/ubifs/file.c
428
mapping_gfp_mask(mapping));
fs/ubifs/file.c
479
return write_begin_slow(mapping, pos, len, foliop);
fs/ubifs/file.c
519
struct address_space *mapping, loff_t pos,
fs/ubifs/file.c
523
struct inode *inode = mapping->host;
fs/ubifs/file.c
561
filemap_dirty_folio(mapping, folio);
fs/ubifs/file.c
596
struct inode *inode = folio->mapping->host;
fs/ubifs/file.c
700
struct address_space *mapping = folio1->mapping;
fs/ubifs/file.c
701
struct inode *inode = mapping->host;
fs/ubifs/file.c
706
gfp_t ra_gfp_mask = readahead_gfp_mask(mapping) & ~__GFP_FS;
fs/ubifs/file.c
768
folio = __filemap_get_folio(mapping, page_offset,
fs/ubifs/file.c
810
struct inode *inode = folio->mapping->host;
fs/ubifs/file.c
889
struct inode *inode = folio->mapping->host;
fs/ubifs/file.c
915
mapping_set_error(folio->mapping, err);
fs/ubifs/file.c
984
struct inode *inode = folio->mapping->host;
fs/udf/file.c
42
struct address_space *mapping = inode->i_mapping;
fs/udf/file.c
51
filemap_invalidate_lock_shared(mapping);
fs/udf/file.c
54
if (folio->mapping != inode->i_mapping || folio_pos(folio) >= size) {
fs/udf/file.c
78
filemap_invalidate_unlock_shared(mapping);
fs/udf/inode.c
167
static void udf_write_failed(struct address_space *mapping, loff_t to)
fs/udf/inode.c
169
struct inode *inode = mapping->host;
fs/udf/inode.c
187
struct inode *inode = folio->mapping->host;
fs/udf/inode.c
205
static int udf_writepages(struct address_space *mapping,
fs/udf/inode.c
208
return __mpage_writepages(mapping, wbc, udf_get_block_wb,
fs/udf/inode.c
214
struct inode *inode = folio->mapping->host;
fs/udf/inode.c
236
struct udf_inode_info *iinfo = UDF_I(rac->mapping->host);
fs/udf/inode.c
249
struct address_space *mapping,
fs/udf/inode.c
259
ret = block_write_begin(mapping, pos, len, foliop,
fs/udf/inode.c
262
udf_write_failed(mapping, pos + len);
fs/udf/inode.c
267
folio = __filemap_get_folio(mapping, 0, FGP_WRITEBEGIN,
fs/udf/inode.c
268
mapping_gfp_mask(mapping));
fs/udf/inode.c
278
struct address_space *mapping,
fs/udf/inode.c
286
return generic_write_end(iocb, mapping, pos, len, copied, folio,
fs/udf/inode.c
301
struct address_space *mapping = file->f_mapping;
fs/udf/inode.c
302
struct inode *inode = mapping->host;
fs/udf/inode.c
311
udf_write_failed(mapping, iocb->ki_pos + count);
fs/udf/inode.c
315
static sector_t udf_bmap(struct address_space *mapping, sector_t block)
fs/udf/inode.c
317
struct udf_inode_info *iinfo = UDF_I(mapping->host);
fs/udf/inode.c
321
return generic_block_bmap(mapping, block, udf_get_block);
fs/udf/symlink.c
102
struct inode *inode = folio->mapping->host;
fs/ufs/balloc.c
242
struct address_space * const mapping = inode->i_mapping;
fs/ufs/balloc.c
261
folio = ufs_get_locked_folio(mapping, index);
fs/ufs/dir.c
112
struct inode *dir = folio->mapping->host;
fs/ufs/dir.c
190
struct address_space *mapping = dir->i_mapping;
fs/ufs/dir.c
191
struct folio *folio = read_mapping_folio(mapping, n, NULL);
fs/ufs/dir.c
48
struct address_space *mapping = folio->mapping;
fs/ufs/dir.c
49
struct inode *dir = mapping->host;
fs/ufs/dir.c
536
struct address_space *mapping = inode->i_mapping;
fs/ufs/dir.c
537
struct folio *folio = filemap_grab_folio(mapping, 0);
fs/ufs/inode.c
1038
struct address_space *mapping = inode->i_mapping;
fs/ufs/inode.c
1053
folio = ufs_get_locked_folio(mapping, lastfrag >>
fs/ufs/inode.c
449
static int ufs_writepages(struct address_space *mapping,
fs/ufs/inode.c
452
return mpage_writepages(mapping, wbc, ufs_getfrag_block);
fs/ufs/inode.c
467
static void ufs_write_failed(struct address_space *mapping, loff_t to)
fs/ufs/inode.c
469
struct inode *inode = mapping->host;
fs/ufs/inode.c
478
struct address_space *mapping,
fs/ufs/inode.c
484
ret = block_write_begin(mapping, pos, len, foliop, ufs_getfrag_block);
fs/ufs/inode.c
486
ufs_write_failed(mapping, pos + len);
fs/ufs/inode.c
492
struct address_space *mapping,
fs/ufs/inode.c
498
ret = generic_write_end(iocb, mapping, pos, len, copied, folio, fsdata);
fs/ufs/inode.c
500
ufs_write_failed(mapping, pos + len);
fs/ufs/inode.c
504
static sector_t ufs_bmap(struct address_space *mapping, sector_t block)
fs/ufs/inode.c
506
return generic_block_bmap(mapping,block,ufs_getfrag_block);
fs/ufs/util.c
197
struct folio *ufs_get_locked_folio(struct address_space *mapping,
fs/ufs/util.c
200
struct inode *inode = mapping->host;
fs/ufs/util.c
201
struct folio *folio = filemap_lock_folio(mapping, index);
fs/ufs/util.c
203
folio = read_mapping_folio(mapping, index, NULL);
fs/ufs/util.c
207
mapping->host->i_ino, index);
fs/ufs/util.c
213
if (unlikely(folio->mapping == NULL)) {
fs/ufs/util.h
271
struct folio *ufs_get_locked_folio(struct address_space *mapping, pgoff_t index);
fs/unicode/mkutf8data.c
2120
unsigned int mapping[19]; /* Magic - guaranteed not to be exceeded. */
fs/unicode/mkutf8data.c
2155
mapping[i] = strtoul(s, &s, 16);
fs/unicode/mkutf8data.c
2156
if (!utf32valid(mapping[i]))
fs/unicode/mkutf8data.c
2160
mapping[i++] = 0;
fs/unicode/mkutf8data.c
2163
memcpy(um, mapping, i * sizeof(unsigned int));
fs/unicode/mkutf8data.c
2181
unsigned int mapping[19]; /* Magic - guaranteed not to be exceeded. */
fs/unicode/mkutf8data.c
2211
mapping[i] = strtoul(s, &s, 16);
fs/unicode/mkutf8data.c
2212
if (!utf32valid(mapping[i]))
fs/unicode/mkutf8data.c
2216
mapping[i++] = 0;
fs/unicode/mkutf8data.c
2219
memcpy(um, mapping, i * sizeof(unsigned int));
fs/unicode/mkutf8data.c
2311
unsigned int mapping[19]; /* Magic - guaranteed not to be exceeded. */
fs/unicode/mkutf8data.c
2355
mapping[i] = strtoul(s, &s, 16);
fs/unicode/mkutf8data.c
2356
if (!utf32valid(mapping[i]))
fs/unicode/mkutf8data.c
2360
mapping[i++] = 0;
fs/unicode/mkutf8data.c
2363
memcpy(um, mapping, i * sizeof(unsigned int));
fs/unicode/mkutf8data.c
2439
unsigned int mapping[4];
fs/unicode/mkutf8data.c
2455
mapping[i++] = lb + li;
fs/unicode/mkutf8data.c
2456
mapping[i++] = vb + vi;
fs/unicode/mkutf8data.c
2458
mapping[i++] = tb + ti;
fs/unicode/mkutf8data.c
2459
mapping[i++] = 0;
fs/unicode/mkutf8data.c
2463
memcpy(um, mapping, i * sizeof(unsigned int));
fs/unicode/mkutf8data.c
2468
memcpy(um, mapping, i * sizeof(unsigned int));
fs/unicode/mkutf8data.c
2492
unsigned int mapping[19]; /* Magic - guaranteed not to be exceeded. */
fs/unicode/mkutf8data.c
2515
mapping[i++] = dc[j];
fs/unicode/mkutf8data.c
2518
mapping[i++] = *um;
fs/unicode/mkutf8data.c
2522
mapping[i++] = 0;
fs/unicode/mkutf8data.c
2527
memcpy(um, mapping, i * sizeof(unsigned int));
fs/unicode/mkutf8data.c
2533
memcpy(um, mapping, i * sizeof(unsigned int));
fs/unicode/mkutf8data.c
2547
unsigned int mapping[19]; /* Magic - guaranteed not to be exceeded. */
fs/unicode/mkutf8data.c
2569
mapping[i++] = dc[j];
fs/unicode/mkutf8data.c
2572
mapping[i++] = *um;
fs/unicode/mkutf8data.c
2576
mapping[i++] = 0;
fs/unicode/mkutf8data.c
2581
memcpy(um, mapping, i * sizeof(unsigned int));
fs/vboxsf/file.c
264
static int vboxsf_writepages(struct address_space *mapping,
fs/vboxsf/file.c
267
struct inode *inode = mapping->host;
fs/vboxsf/file.c
278
while ((folio = writeback_iter(mapping, wbc, folio, &error))) {
fs/vboxsf/file.c
303
struct address_space *mapping,
fs/vboxsf/file.c
307
struct inode *inode = mapping->host;
fs/xfs/scrub/bmap.c
55
struct address_space *mapping = VFS_I(sc->ip)->i_mapping;
fs/xfs/scrub/bmap.c
83
error = filemap_fdatawrite(mapping);
fs/xfs/scrub/bmap.c
85
error = filemap_fdatawait_keep_errors(mapping);
fs/xfs/xfs_aops.c
262
struct xfs_inode *ip = XFS_I(folio->mapping->host);
fs/xfs/xfs_aops.c
675
struct address_space *mapping,
fs/xfs/xfs_aops.c
678
struct xfs_inode *ip = XFS_I(mapping->host);
fs/xfs/xfs_aops.c
685
.inode = mapping->host,
fs/xfs/xfs_aops.c
699
.inode = mapping->host,
fs/xfs/xfs_aops.c
711
struct address_space *mapping,
fs/xfs/xfs_aops.c
714
struct xfs_inode *ip = XFS_I(mapping->host);
fs/xfs/xfs_aops.c
717
return dax_writeback_mapping_range(mapping,
fs/xfs/xfs_aops.c
723
struct address_space *mapping,
fs/xfs/xfs_aops.c
726
struct xfs_inode *ip = XFS_I(mapping->host);
fs/xfs/xfs_aops.c
741
return iomap_bmap(mapping, block, &xfs_read_iomap_ops);
fs/xfs/xfs_notify_failure.c
107
mapping = VFS_I(ip)->i_mapping;
fs/xfs/xfs_notify_failure.c
112
if (dax_mapping(mapping))
fs/xfs/xfs_notify_failure.c
113
error = mf_dax_kill_procs(mapping, pgoff, pgcnt,
fs/xfs/xfs_notify_failure.c
118
invalidate_inode_pages2_range(mapping, pgoff,
fs/xfs/xfs_notify_failure.c
82
struct address_space *mapping;
fs/zonefs/file.c
158
static int zonefs_writepages(struct address_space *mapping,
fs/zonefs/file.c
162
.inode = mapping->host,
include/asm-generic/cacheflush.h
59
static inline void flush_dcache_mmap_lock(struct address_space *mapping)
include/asm-generic/cacheflush.h
65
static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
include/drm/ttm/ttm_device.h
299
struct device *dev, struct address_space *mapping,
include/linux/backing-dev.h
134
static inline bool mapping_can_writeback(struct address_space *mapping)
include/linux/backing-dev.h
136
return inode_to_bdi(mapping->host)->capabilities & BDI_CAP_WRITEBACK;
include/linux/buffer_head.h
261
int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
include/linux/buffer_head.h
512
bool block_dirty_folio(struct address_space *mapping, struct folio *folio);
include/linux/buffer_head.h
521
int sync_mapping_buffers(struct address_space *mapping);
include/linux/buffer_head.h
534
static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
include/linux/dax.h
154
int dax_writeback_mapping_range(struct address_space *mapping,
include/linux/dax.h
157
struct page *dax_layout_busy_page(struct address_space *mapping);
include/linux/dax.h
158
struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t start, loff_t end);
include/linux/dax.h
161
dax_entry_t dax_lock_mapping_entry(struct address_space *mapping,
include/linux/dax.h
163
void dax_unlock_mapping_entry(struct address_space *mapping,
include/linux/dax.h
166
static inline struct page *dax_layout_busy_page(struct address_space *mapping)
include/linux/dax.h
171
static inline struct page *dax_layout_busy_page_range(struct address_space *mapping, pgoff_t start, pgoff_t nr_pages)
include/linux/dax.h
176
static inline int dax_writeback_mapping_range(struct address_space *mapping,
include/linux/dax.h
184
if (IS_DAX(folio->mapping->host))
include/linux/dax.h
193
static inline dax_entry_t dax_lock_mapping_entry(struct address_space *mapping,
include/linux/dax.h
199
static inline void dax_unlock_mapping_entry(struct address_space *mapping,
include/linux/dax.h
264
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
include/linux/dax.h
265
void dax_delete_mapping_range(struct address_space *mapping,
include/linux/dax.h
267
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
include/linux/dax.h
285
static inline bool dax_mapping(struct address_space *mapping)
include/linux/dax.h
287
return mapping->host && IS_DAX(mapping->host);
include/linux/fb.h
223
struct address_space *mapping; /* page cache object for fb device */
include/linux/fs.h
1081
static inline void filemap_invalidate_lock(struct address_space *mapping)
include/linux/fs.h
1083
down_write(&mapping->invalidate_lock);
include/linux/fs.h
1086
static inline void filemap_invalidate_unlock(struct address_space *mapping)
include/linux/fs.h
1088
up_write(&mapping->invalidate_lock);
include/linux/fs.h
1091
static inline void filemap_invalidate_lock_shared(struct address_space *mapping)
include/linux/fs.h
1093
down_read(&mapping->invalidate_lock);
include/linux/fs.h
1097
struct address_space *mapping)
include/linux/fs.h
1099
return down_read_trylock(&mapping->invalidate_lock);
include/linux/fs.h
1103
struct address_space *mapping)
include/linux/fs.h
1105
up_read(&mapping->invalidate_lock);
include/linux/fs.h
2611
int filemap_flush_range(struct address_space *mapping, loff_t start,
include/linux/fs.h
2646
struct address_space *mapping = iocb->ki_filp->f_mapping;
include/linux/fs.h
2648
filemap_flush_range(mapping, iocb->ki_pos - count,
include/linux/fs.h
2926
extern void address_space_init_once(struct address_space *mapping);
include/linux/fs.h
3074
file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
include/linux/fs.h
3251
struct address_space *mapping,
include/linux/fs.h
414
int (*write_begin)(const struct kiocb *, struct address_space *mapping,
include/linux/fs.h
417
int (*write_end)(const struct kiocb *, struct address_space *mapping,
include/linux/fs.h
505
static inline bool mapping_tagged(const struct address_space *mapping, xa_mark_t tag)
include/linux/fs.h
507
return xa_marked(&mapping->i_pages, tag);
include/linux/fs.h
510
static inline void i_mmap_lock_write(struct address_space *mapping)
include/linux/fs.h
512
down_write(&mapping->i_mmap_rwsem);
include/linux/fs.h
515
static inline int i_mmap_trylock_write(struct address_space *mapping)
include/linux/fs.h
517
return down_write_trylock(&mapping->i_mmap_rwsem);
include/linux/fs.h
520
static inline void i_mmap_unlock_write(struct address_space *mapping)
include/linux/fs.h
522
up_write(&mapping->i_mmap_rwsem);
include/linux/fs.h
525
static inline int i_mmap_trylock_read(struct address_space *mapping)
include/linux/fs.h
527
return down_read_trylock(&mapping->i_mmap_rwsem);
include/linux/fs.h
530
static inline void i_mmap_lock_read(struct address_space *mapping)
include/linux/fs.h
532
down_read(&mapping->i_mmap_rwsem);
include/linux/fs.h
535
static inline void i_mmap_unlock_read(struct address_space *mapping)
include/linux/fs.h
537
up_read(&mapping->i_mmap_rwsem);
include/linux/fs.h
540
static inline void i_mmap_assert_locked(struct address_space *mapping)
include/linux/fs.h
542
lockdep_assert_held(&mapping->i_mmap_rwsem);
include/linux/fs.h
545
static inline void i_mmap_assert_write_locked(struct address_space *mapping)
include/linux/fs.h
547
lockdep_assert_held_write(&mapping->i_mmap_rwsem);
include/linux/fs.h
553
static inline int mapping_mapped(const struct address_space *mapping)
include/linux/fs.h
555
return !RB_EMPTY_ROOT(&mapping->i_mmap.rb_root);
include/linux/fs.h
567
static inline int mapping_writably_mapped(const struct address_space *mapping)
include/linux/fs.h
569
return atomic_read(&mapping->i_mmap_writable) > 0;
include/linux/fs.h
572
static inline int mapping_map_writable(struct address_space *mapping)
include/linux/fs.h
574
return atomic_inc_unless_negative(&mapping->i_mmap_writable) ?
include/linux/fs.h
578
static inline void mapping_unmap_writable(struct address_space *mapping)
include/linux/fs.h
580
atomic_dec(&mapping->i_mmap_writable);
include/linux/fs.h
583
static inline int mapping_deny_writable(struct address_space *mapping)
include/linux/fs.h
585
return atomic_dec_unless_positive(&mapping->i_mmap_writable) ?
include/linux/fs.h
589
static inline void mapping_allow_writable(struct address_space *mapping)
include/linux/fs.h
591
atomic_inc(&mapping->i_mmap_writable);
include/linux/fscache.h
176
struct address_space *mapping,
include/linux/fscache.h
583
static inline void fscache_clear_page_bits(struct address_space *mapping,
include/linux/fscache.h
588
__fscache_clear_page_bits(mapping, start, len);
include/linux/fscache.h
617
struct address_space *mapping,
include/linux/fscache.h
624
__fscache_write_to_cache(cookie, mapping, start, len, i_size,
include/linux/fscrypt.h
361
return page->mapping == NULL;
include/linux/fscrypt.h
371
return folio->mapping == NULL;
include/linux/hugetlb.h
1097
struct address_space *mapping, pgoff_t idx)
include/linux/hugetlb.h
162
u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
include/linux/hugetlb.h
715
int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
include/linux/hugetlb.h
820
struct address_space *mapping, pgoff_t idx)
include/linux/hugetlb.h
822
return filemap_lock_folio(mapping, idx << huge_page_order(h));
include/linux/io-mapping.h
107
io_mapping_map_wc(struct io_mapping *mapping,
include/linux/io-mapping.h
113
BUG_ON(offset >= mapping->size);
include/linux/io-mapping.h
114
phys_addr = mapping->base + offset;
include/linux/io-mapping.h
147
io_mapping_fini(struct io_mapping *mapping)
include/linux/io-mapping.h
149
iounmap(mapping->iomem);
include/linux/io-mapping.h
154
io_mapping_map_wc(struct io_mapping *mapping,
include/linux/io-mapping.h
158
return mapping->iomem + offset;
include/linux/io-mapping.h
168
io_mapping_map_atomic_wc(struct io_mapping *mapping,
include/linux/io-mapping.h
176
return io_mapping_map_wc(mapping, offset, PAGE_SIZE);
include/linux/io-mapping.h
191
io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset)
include/linux/io-mapping.h
193
return io_mapping_map_wc(mapping, offset, PAGE_SIZE);
include/linux/io-mapping.h
58
io_mapping_fini(struct io_mapping *mapping)
include/linux/io-mapping.h
60
iomap_free(mapping->base, mapping->size);
include/linux/io-mapping.h
65
io_mapping_map_atomic_wc(struct io_mapping *mapping,
include/linux/io-mapping.h
70
BUG_ON(offset >= mapping->size);
include/linux/io-mapping.h
71
phys_addr = mapping->base + offset;
include/linux/io-mapping.h
77
return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
include/linux/io-mapping.h
92
io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset)
include/linux/io-mapping.h
96
BUG_ON(offset >= mapping->size);
include/linux/io-mapping.h
97
phys_addr = mapping->base + offset;
include/linux/io-mapping.h
98
return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
include/linux/iomap.h
355
bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio);
include/linux/iomap.h
381
sector_t iomap_bmap(struct address_space *mapping, sector_t bno,
include/linux/jbd2.h
1698
struct address_space *mapping = journal->j_fs_dev->bd_mapping;
include/linux/jbd2.h
1704
errseq_check_and_advance(&mapping->wb_err, &journal->j_fs_dev_wb_err);
include/linux/jbd2.h
1709
struct address_space *mapping = journal->j_fs_dev->bd_mapping;
include/linux/jbd2.h
1711
return errseq_check(&mapping->wb_err,
include/linux/libnvdimm.h
131
struct nd_mapping_desc *mapping;
include/linux/memory-failure.h
9
struct address_space *mapping;
include/linux/memremap.h
254
new_folio->page.mapping =
include/linux/memremap.h
255
original_folio->page.mapping;
include/linux/migrate.h
57
int migrate_folio(struct address_space *mapping, struct folio *dst,
include/linux/migrate.h
66
int migrate_huge_page_move_mapping(struct address_space *mapping,
include/linux/migrate.h
71
int folio_migrate_mapping(struct address_space *mapping,
include/linux/migrate.h
90
static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
include/linux/mlx5/cq.h
122
} mapping;
include/linux/mm.h
2685
ref_count += !!folio->mapping << order;
include/linux/mm.h
2890
int generic_error_remove_folio(struct address_space *mapping,
include/linux/mm.h
2903
void unmap_mapping_pages(struct address_space *mapping,
include/linux/mm.h
2905
void unmap_mapping_range(struct address_space *mapping,
include/linux/mm.h
2923
static inline void unmap_mapping_pages(struct address_space *mapping,
include/linux/mm.h
2925
static inline void unmap_mapping_range(struct address_space *mapping,
include/linux/mm.h
2929
static inline void unmap_shared_mapping_range(struct address_space *mapping,
include/linux/mm.h
2932
unmap_mapping_range(mapping, holebegin, holelen, 0);
include/linux/mm.h
3921
void truncate_inode_pages(struct address_space *mapping, loff_t lstart);
include/linux/mm.h
3922
void truncate_inode_pages_range(struct address_space *mapping, loff_t lstart,
include/linux/mm.h
3924
void truncate_inode_pages_final(struct address_space *mapping);
include/linux/mm.h
4593
int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
include/linux/mm.h
4732
unsigned long clean_record_shared_mapping_range(struct address_space *mapping,
include/linux/mm.h
4739
unsigned long wp_shared_mapping_range(struct address_space *mapping,
include/linux/mm_types.h
103
struct address_space *mapping;
include/linux/mm_types.h
421
struct address_space *mapping;
include/linux/mm_types.h
512
FOLIO_MATCH(mapping, mapping);
include/linux/mm_types.h
614
TABLE_MATCH(mapping, __page_mapping);
include/linux/mpage.h
20
int __mpage_writepages(struct address_space *mapping,
include/linux/mpage.h
24
static inline int mpage_writepages(struct address_space *mapping,
include/linux/mpage.h
27
return __mpage_writepages(mapping, wbc, get_block, NULL);
include/linux/netfs.h
228
struct address_space *mapping; /* The mapping being accessed */
include/linux/netfs.h
409
int netfs_writeback_single(struct address_space *mapping,
include/linux/netfs.h
420
int netfs_writepages(struct address_space *mapping,
include/linux/netfs.h
422
bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio);
include/linux/netfs.h
460
int netfs_alloc_folioq_buffer(struct address_space *mapping,
include/linux/nfs_fs.h
438
extern int nfs_sync_mapping(struct address_space *mapping);
include/linux/nfs_fs.h
439
extern void nfs_zap_mapping(struct inode *inode, struct address_space *mapping);
include/linux/nfs_fs.h
459
extern int nfs_clear_invalid_mapping(struct address_space *mapping);
include/linux/nfs_fs.h
461
extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping);
include/linux/nfs_page.h
207
return req->wb_page->mapping->host;
include/linux/nfs_page.h
208
return folio->mapping->host;
include/linux/page-flags.h
724
return ((unsigned long)folio->mapping & FOLIO_MAPPING_ANON) != 0;
include/linux/page-flags.h
729
unsigned long flags = (unsigned long)page_folio(page)->mapping;
include/linux/page-flags.h
747
return ((unsigned long)folio->mapping & FOLIO_MAPPING_FLAGS) ==
include/linux/pagemap.h
1005
extern struct page * read_cache_page_gfp(struct address_space *mapping,
include/linux/pagemap.h
1008
static inline struct page *read_mapping_page(struct address_space *mapping,
include/linux/pagemap.h
1011
return read_cache_page(mapping, index, NULL, file);
include/linux/pagemap.h
1014
static inline struct folio *read_mapping_folio(struct address_space *mapping,
include/linux/pagemap.h
1017
return read_cache_folio(mapping, index, NULL, file);
include/linux/pagemap.h
102
return errseq_check(&mapping->wb_err, since);
include/linux/pagemap.h
112
static inline errseq_t filemap_sample_wb_err(struct address_space *mapping)
include/linux/pagemap.h
114
return errseq_sample(&mapping->wb_err);
include/linux/pagemap.h
1265
bool noop_dirty_folio(struct address_space *mapping, struct folio *folio);
include/linux/pagemap.h
1268
int filemap_migrate_folio(struct address_space *mapping, struct folio *dst,
include/linux/pagemap.h
1285
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
include/linux/pagemap.h
1287
int filemap_add_folio(struct address_space *mapping, struct folio *folio,
include/linux/pagemap.h
1292
void delete_from_page_cache_batch(struct address_space *mapping,
include/linux/pagemap.h
1299
int __filemap_add_folio(struct address_space *mapping, struct folio *folio,
include/linux/pagemap.h
1302
bool filemap_range_has_writeback(struct address_space *mapping,
include/linux/pagemap.h
1319
static inline bool filemap_range_needs_writeback(struct address_space *mapping,
include/linux/pagemap.h
1323
if (!mapping->nrpages)
include/linux/pagemap.h
1325
if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
include/linux/pagemap.h
1326
!mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
include/linux/pagemap.h
1328
return filemap_range_has_writeback(mapping, start_byte, end_byte);
include/linux/pagemap.h
1349
struct address_space *mapping;
include/linux/pagemap.h
1363
.mapping = m, \
include/linux/pagemap.h
1392
void page_cache_sync_readahead(struct address_space *mapping,
include/linux/pagemap.h
1396
DEFINE_READAHEAD(ractl, file, ra, mapping, index);
include/linux/pagemap.h
140
static inline bool mapping_empty(const struct address_space *mapping)
include/linux/pagemap.h
1414
void page_cache_async_readahead(struct address_space *mapping,
include/linux/pagemap.h
1418
DEFINE_READAHEAD(ractl, file, ra, mapping, folio->index);
include/linux/pagemap.h
142
return xa_empty(&mapping->i_pages);
include/linux/pagemap.h
1435
folio = xa_load(&ractl->mapping->i_pages, ractl->_index);
include/linux/pagemap.h
1463
XA_STATE(xas, &rac->mapping->i_pages, 0);
include/linux/pagemap.h
1553
if (!folio->mapping)
include/linux/pagemap.h
166
static inline bool mapping_shrinkable(const struct address_space *mapping)
include/linux/pagemap.h
179
head = rcu_access_pointer(mapping->i_pages.xa_head);
include/linux/pagemap.h
21
unsigned long invalidate_mapping_pages(struct address_space *mapping,
include/linux/pagemap.h
238
static inline void mapping_set_error(struct address_space *mapping, int error)
include/linux/pagemap.h
244
__filemap_set_wb_err(mapping, error);
include/linux/pagemap.h
247
if (mapping->host)
include/linux/pagemap.h
248
errseq_set(&mapping->host->i_sb->s_wb_err, error);
include/linux/pagemap.h
252
set_bit(AS_ENOSPC, &mapping->flags);
include/linux/pagemap.h
254
set_bit(AS_EIO, &mapping->flags);
include/linux/pagemap.h
257
static inline void mapping_set_unevictable(struct address_space *mapping)
include/linux/pagemap.h
259
set_bit(AS_UNEVICTABLE, &mapping->flags);
include/linux/pagemap.h
262
static inline void mapping_clear_unevictable(struct address_space *mapping)
include/linux/pagemap.h
264
clear_bit(AS_UNEVICTABLE, &mapping->flags);
include/linux/pagemap.h
267
static inline bool mapping_unevictable(const struct address_space *mapping)
include/linux/pagemap.h
269
return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags);
include/linux/pagemap.h
272
static inline void mapping_set_exiting(struct address_space *mapping)
include/linux/pagemap.h
274
set_bit(AS_EXITING, &mapping->flags);
include/linux/pagemap.h
277
static inline int mapping_exiting(const struct address_space *mapping)
include/linux/pagemap.h
279
return test_bit(AS_EXITING, &mapping->flags);
include/linux/pagemap.h
282
static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
include/linux/pagemap.h
284
set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
include/linux/pagemap.h
287
static inline int mapping_use_writeback_tags(const struct address_space *mapping)
include/linux/pagemap.h
289
return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
include/linux/pagemap.h
292
static inline bool mapping_release_always(const struct address_space *mapping)
include/linux/pagemap.h
294
return test_bit(AS_RELEASE_ALWAYS, &mapping->flags);
include/linux/pagemap.h
297
static inline void mapping_set_release_always(struct address_space *mapping)
include/linux/pagemap.h
299
set_bit(AS_RELEASE_ALWAYS, &mapping->flags);
include/linux/pagemap.h
30
int invalidate_inode_pages2(struct address_space *mapping);
include/linux/pagemap.h
302
static inline void mapping_clear_release_always(struct address_space *mapping)
include/linux/pagemap.h
304
clear_bit(AS_RELEASE_ALWAYS, &mapping->flags);
include/linux/pagemap.h
307
static inline bool mapping_stable_writes(const struct address_space *mapping)
include/linux/pagemap.h
309
return test_bit(AS_STABLE_WRITES, &mapping->flags);
include/linux/pagemap.h
31
int invalidate_inode_pages2_range(struct address_space *mapping,
include/linux/pagemap.h
312
static inline void mapping_set_stable_writes(struct address_space *mapping)
include/linux/pagemap.h
314
set_bit(AS_STABLE_WRITES, &mapping->flags);
include/linux/pagemap.h
317
static inline void mapping_clear_stable_writes(struct address_space *mapping)
include/linux/pagemap.h
319
clear_bit(AS_STABLE_WRITES, &mapping->flags);
include/linux/pagemap.h
322
static inline void mapping_set_inaccessible(struct address_space *mapping)
include/linux/pagemap.h
329
set_bit(AS_UNEVICTABLE, &mapping->flags);
include/linux/pagemap.h
330
set_bit(AS_INACCESSIBLE, &mapping->flags);
include/linux/pagemap.h
333
static inline bool mapping_inaccessible(const struct address_space *mapping)
include/linux/pagemap.h
335
return test_bit(AS_INACCESSIBLE, &mapping->flags);
include/linux/pagemap.h
338
static inline void mapping_set_writeback_may_deadlock_on_reclaim(struct address_space *mapping)
include/linux/pagemap.h
340
set_bit(AS_WRITEBACK_MAY_DEADLOCK_ON_RECLAIM, &mapping->flags);
include/linux/pagemap.h
343
static inline bool mapping_writeback_may_deadlock_on_reclaim(const struct address_space *mapping)
include/linux/pagemap.h
345
return test_bit(AS_WRITEBACK_MAY_DEADLOCK_ON_RECLAIM, &mapping->flags);
include/linux/pagemap.h
348
static inline gfp_t mapping_gfp_mask(const struct address_space *mapping)
include/linux/pagemap.h
35
int filemap_invalidate_pages(struct address_space *mapping,
include/linux/pagemap.h
350
return mapping->gfp_mask;
include/linux/pagemap.h
354
static inline gfp_t mapping_gfp_constraint(const struct address_space *mapping,
include/linux/pagemap.h
357
return mapping_gfp_mask(mapping) & gfp_mask;
include/linux/pagemap.h
41
int filemap_flush_nr(struct address_space *mapping, long *nr_to_write);
include/linux/pagemap.h
418
static inline void mapping_set_folio_order_range(struct address_space *mapping,
include/linux/pagemap.h
42
int filemap_fdatawait_keep_errors(struct address_space *mapping);
include/linux/pagemap.h
434
mapping->flags = (mapping->flags & ~AS_FOLIO_ORDER_MASK) |
include/linux/pagemap.h
438
static inline void mapping_set_folio_min_order(struct address_space *mapping,
include/linux/pagemap.h
44
int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
include/linux/pagemap.h
441
mapping_set_folio_order_range(mapping, min, MAX_PAGECACHE_ORDER);
include/linux/pagemap.h
455
static inline void mapping_set_large_folios(struct address_space *mapping)
include/linux/pagemap.h
457
mapping_set_folio_order_range(mapping, 0, MAX_PAGECACHE_ORDER);
include/linux/pagemap.h
461
mapping_max_folio_order(const struct address_space *mapping)
include/linux/pagemap.h
465
return (mapping->flags & AS_FOLIO_ORDER_MAX_MASK) >> AS_FOLIO_ORDER_MAX;
include/linux/pagemap.h
469
mapping_min_folio_order(const struct address_space *mapping)
include/linux/pagemap.h
473
return (mapping->flags & AS_FOLIO_ORDER_MIN_MASK) >> AS_FOLIO_ORDER_MIN;
include/linux/pagemap.h
477
mapping_min_folio_nrpages(const struct address_space *mapping)
include/linux/pagemap.h
479
return 1UL << mapping_min_folio_order(mapping);
include/linux/pagemap.h
483
mapping_min_folio_nrbytes(const struct address_space *mapping)
include/linux/pagemap.h
485
return mapping_min_folio_nrpages(mapping) << PAGE_SHIFT;
include/linux/pagemap.h
49
static inline int filemap_fdatawait(struct address_space *mapping)
include/linux/pagemap.h
497
static inline pgoff_t mapping_align_index(const struct address_space *mapping,
include/linux/pagemap.h
500
return round_down(index, mapping_min_folio_nrpages(mapping));
include/linux/pagemap.h
507
static inline bool mapping_large_folio_support(const struct address_space *mapping)
include/linux/pagemap.h
51
return filemap_fdatawait_range(mapping, 0, LLONG_MAX);
include/linux/pagemap.h
510
VM_WARN_ONCE((unsigned long)mapping & FOLIO_MAPPING_ANON,
include/linux/pagemap.h
513
return mapping_max_folio_order(mapping) > 0;
include/linux/pagemap.h
517
static inline size_t mapping_max_folio_size(const struct address_space *mapping)
include/linux/pagemap.h
519
return PAGE_SIZE << mapping_max_folio_order(mapping);
include/linux/pagemap.h
522
static inline int filemap_nr_thps(const struct address_space *mapping)
include/linux/pagemap.h
525
return atomic_read(&mapping->nr_thps);
include/linux/pagemap.h
531
static inline void filemap_nr_thps_inc(struct address_space *mapping)
include/linux/pagemap.h
534
if (!mapping_large_folio_support(mapping))
include/linux/pagemap.h
535
atomic_inc(&mapping->nr_thps);
include/linux/pagemap.h
537
WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0);
include/linux/pagemap.h
541
static inline void filemap_nr_thps_dec(struct address_space *mapping)
include/linux/pagemap.h
544
if (!mapping_large_folio_support(mapping))
include/linux/pagemap.h
545
atomic_dec(&mapping->nr_thps);
include/linux/pagemap.h
547
WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0);
include/linux/pagemap.h
55
int filemap_write_and_wait_range(struct address_space *mapping,
include/linux/pagemap.h
57
int filemap_fdatawrite_range(struct address_space *mapping,
include/linux/pagemap.h
584
return folio->mapping->host;
include/linux/pagemap.h
59
int filemap_check_errors(struct address_space *mapping);
include/linux/pagemap.h
60
void __filemap_set_wb_err(struct address_space *mapping, int err);
include/linux/pagemap.h
63
static inline int filemap_write_and_wait(struct address_space *mapping)
include/linux/pagemap.h
65
return filemap_write_and_wait_range(mapping, 0, LLONG_MAX);
include/linux/pagemap.h
679
pgoff_t page_cache_next_miss(struct address_space *mapping,
include/linux/pagemap.h
681
pgoff_t page_cache_prev_miss(struct address_space *mapping,
include/linux/pagemap.h
754
void *filemap_get_entry(struct address_space *mapping, pgoff_t index);
include/linux/pagemap.h
755
struct folio *__filemap_get_folio_mpol(struct address_space *mapping,
include/linux/pagemap.h
757
struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
include/linux/pagemap.h
760
static inline struct folio *__filemap_get_folio(struct address_space *mapping,
include/linux/pagemap.h
763
return __filemap_get_folio_mpol(mapping, index, fgf_flags, gfp, NULL);
include/linux/pagemap.h
780
struct address_space *mapping, pgoff_t index, size_t len)
include/linux/pagemap.h
789
return __filemap_get_folio(mapping, index, fgp_flags,
include/linux/pagemap.h
790
mapping_gfp_mask(mapping));
include/linux/pagemap.h
804
static inline struct folio *filemap_get_folio(struct address_space *mapping,
include/linux/pagemap.h
807
return __filemap_get_folio(mapping, index, 0, 0);
include/linux/pagemap.h
82
static inline void filemap_set_wb_err(struct address_space *mapping, int err)
include/linux/pagemap.h
822
static inline struct folio *filemap_lock_folio(struct address_space *mapping,
include/linux/pagemap.h
825
return __filemap_get_folio(mapping, index, FGP_LOCK, 0);
include/linux/pagemap.h
840
static inline struct folio *filemap_grab_folio(struct address_space *mapping,
include/linux/pagemap.h
843
return __filemap_get_folio(mapping, index,
include/linux/pagemap.h
845
mapping_gfp_mask(mapping));
include/linux/pagemap.h
858
static inline struct page *find_get_page(struct address_space *mapping,
include/linux/pagemap.h
86
__filemap_set_wb_err(mapping, err);
include/linux/pagemap.h
861
return pagecache_get_page(mapping, offset, 0, 0);
include/linux/pagemap.h
864
static inline struct page *find_get_page_flags(struct address_space *mapping,
include/linux/pagemap.h
867
return pagecache_get_page(mapping, offset, fgp_flags, 0);
include/linux/pagemap.h
883
static inline struct page *find_lock_page(struct address_space *mapping,
include/linux/pagemap.h
886
return pagecache_get_page(mapping, index, FGP_LOCK, 0);
include/linux/pagemap.h
908
static inline struct page *find_or_create_page(struct address_space *mapping,
include/linux/pagemap.h
911
return pagecache_get_page(mapping, index,
include/linux/pagemap.h
930
static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
include/linux/pagemap.h
933
return pagecache_get_page(mapping, index,
include/linux/pagemap.h
935
mapping_gfp_mask(mapping));
include/linux/pagemap.h
99
static inline int filemap_check_wb_err(struct address_space *mapping,
include/linux/pagemap.h
990
unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
include/linux/pagemap.h
992
unsigned filemap_get_folios_contig(struct address_space *mapping,
include/linux/pagemap.h
994
unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start,
include/linux/pagemap.h
996
unsigned filemap_get_folios_dirty(struct address_space *mapping,
include/linux/pagewalk.h
145
int walk_page_mapping(struct address_space *mapping, pgoff_t first_index,
include/linux/rmap.h
392
unsigned long mapping = (unsigned long)folio->mapping;
include/linux/rmap.h
395
anon_vma = (void *)(mapping - FOLIO_MAPPING_ANON);
include/linux/rmap.h
931
int mapping_wrprotect_range(struct address_space *mapping, pgoff_t pgoff,
include/linux/secretmem.h
11
return mapping->a_ops == &secretmem_aops;
include/linux/secretmem.h
24
static inline bool secretmem_mapping(struct address_space *mapping)
include/linux/secretmem.h
9
static inline bool secretmem_mapping(struct address_space *mapping)
include/linux/sh_clk.h
63
struct clk_mapping *mapping;
include/linux/shmem_fs.h
115
bool shmem_mapping(const struct address_space *mapping);
include/linux/shmem_fs.h
117
static inline bool shmem_mapping(const struct address_space *mapping)
include/linux/shmem_fs.h
122
void shmem_unlock_mapping(struct address_space *mapping);
include/linux/shmem_fs.h
123
struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
include/linux/shmem_fs.h
162
extern unsigned long shmem_partial_swap_usage(struct address_space *mapping,
include/linux/shmem_fs.h
176
struct folio *shmem_read_folio_gfp(struct address_space *mapping,
include/linux/shmem_fs.h
179
static inline struct folio *shmem_read_folio(struct address_space *mapping,
include/linux/shmem_fs.h
182
return shmem_read_folio_gfp(mapping, index, mapping_gfp_mask(mapping));
include/linux/shmem_fs.h
186
struct address_space *mapping, pgoff_t index)
include/linux/shmem_fs.h
188
return shmem_read_mapping_page_gfp(mapping, index,
include/linux/shmem_fs.h
189
mapping_gfp_mask(mapping));
include/linux/swap.h
394
long remove_mapping(struct address_space *mapping, struct folio *folio);
include/linux/tpm_eventlog.h
166
void *mapping = NULL;
include/linux/tpm_eventlog.h
186
mapping = TPM_MEMREMAP((unsigned long)marker_start,
include/linux/tpm_eventlog.h
188
if (!mapping) {
include/linux/tpm_eventlog.h
193
mapping = marker_start;
include/linux/tpm_eventlog.h
196
event = (struct tcg_pcr_event2_head *)mapping;
include/linux/tpm_eventlog.h
233
TPM_MEMUNMAP(mapping, mapping_size);
include/linux/tpm_eventlog.h
235
mapping = TPM_MEMREMAP((unsigned long)marker,
include/linux/tpm_eventlog.h
237
if (!mapping) {
include/linux/tpm_eventlog.h
242
mapping = marker;
include/linux/tpm_eventlog.h
245
memcpy(&halg, mapping, halg_size);
include/linux/tpm_eventlog.h
267
TPM_MEMUNMAP(mapping, mapping_size);
include/linux/tpm_eventlog.h
269
mapping = TPM_MEMREMAP((unsigned long)marker,
include/linux/tpm_eventlog.h
271
if (!mapping) {
include/linux/tpm_eventlog.h
276
mapping = marker;
include/linux/tpm_eventlog.h
279
event_field = (struct tcg_event_field *)mapping;
include/linux/tpm_eventlog.h
290
TPM_MEMUNMAP(mapping, mapping_size);
include/linux/uacce.h
101
struct address_space *mapping;
include/linux/writeback.h
352
void balance_dirty_pages_ratelimited(struct address_space *mapping);
include/linux/writeback.h
353
int balance_dirty_pages_ratelimited_flags(struct address_space *mapping,
include/linux/writeback.h
358
struct folio *writeback_iter(struct address_space *mapping,
include/linux/writeback.h
361
int do_writepages(struct address_space *mapping, struct writeback_control *wbc);
include/linux/writeback.h
363
void tag_pages_for_writeback(struct address_space *mapping,
include/linux/writeback.h
366
bool filemap_dirty_folio(struct address_space *mapping, struct folio *folio);
include/misc/ocxl.h
131
struct address_space *mapping);
include/trace/events/ext4.h
663
__entry->dev = folio->mapping->host->i_sb->s_dev;
include/trace/events/ext4.h
664
__entry->ino = folio->mapping->host->i_ino;
include/trace/events/f2fs.h
1191
__entry->dev = folio->mapping->host->i_sb->s_dev;
include/trace/events/f2fs.h
1192
__entry->ino = folio->mapping->host->i_ino;
include/trace/events/f2fs.h
1219
TP_CONDITION(folio->mapping)
include/trace/events/f2fs.h
1228
TP_CONDITION(folio->mapping)
include/trace/events/f2fs.h
1376
__entry->dev = folio->mapping->host->i_sb->s_dev;
include/trace/events/f2fs.h
1377
__entry->ino = folio->mapping->host->i_ino;
include/trace/events/f2fs.h
1379
__entry->dir = S_ISDIR(folio->mapping->host->i_mode);
include/trace/events/filemap.h
102
TP_ARGS(mapping, index, last_index)
include/trace/events/filemap.h
107
struct address_space *mapping,
include/trace/events/filemap.h
111
TP_ARGS(mapping, index, last_index)
include/trace/events/filemap.h
115
TP_PROTO(struct address_space *mapping, pgoff_t index),
include/trace/events/filemap.h
117
TP_ARGS(mapping, index),
include/trace/events/filemap.h
126
__entry->i_ino = mapping->host->i_ino;
include/trace/events/filemap.h
127
if (mapping->host->i_sb)
include/trace/events/filemap.h
129
mapping->host->i_sb->s_dev;
include/trace/events/filemap.h
131
__entry->s_dev = mapping->host->i_rdev;
include/trace/events/filemap.h
144
TP_PROTO(struct address_space *mapping, errseq_t eseq),
include/trace/events/filemap.h
146
TP_ARGS(mapping, eseq),
include/trace/events/filemap.h
155
__entry->i_ino = mapping->host->i_ino;
include/trace/events/filemap.h
157
if (mapping->host->i_sb)
include/trace/events/filemap.h
158
__entry->s_dev = mapping->host->i_sb->s_dev;
include/trace/events/filemap.h
160
__entry->s_dev = mapping->host->i_rdev;
include/trace/events/filemap.h
32
__entry->i_ino = folio->mapping->host->i_ino;
include/trace/events/filemap.h
34
if (folio->mapping->host->i_sb)
include/trace/events/filemap.h
35
__entry->s_dev = folio->mapping->host->i_sb->s_dev;
include/trace/events/filemap.h
37
__entry->s_dev = folio->mapping->host->i_rdev;
include/trace/events/filemap.h
62
struct address_space *mapping,
include/trace/events/filemap.h
67
TP_ARGS(mapping, index, last_index),
include/trace/events/filemap.h
77
__entry->i_ino = mapping->host->i_ino;
include/trace/events/filemap.h
78
if (mapping->host->i_sb)
include/trace/events/filemap.h
80
mapping->host->i_sb->s_dev;
include/trace/events/filemap.h
82
__entry->s_dev = mapping->host->i_rdev;
include/trace/events/filemap.h
98
struct address_space *mapping,
include/trace/events/netfs.h
496
struct address_space *__m = READ_ONCE(folio->mapping);
include/trace/events/page_ref.h
24
__field(void *, mapping)
include/trace/events/page_ref.h
34
__entry->mapping = page->mapping;
include/trace/events/page_ref.h
43
__entry->mapcount, __entry->mapping, __entry->mt,
include/trace/events/page_ref.h
72
__field(void *, mapping)
include/trace/events/page_ref.h
83
__entry->mapping = page->mapping;
include/trace/events/page_ref.h
93
__entry->mapcount, __entry->mapping, __entry->mt,
include/trace/events/writeback.h
100
TP_ARGS(folio, mapping)
include/trace/events/writeback.h
289
struct address_space *mapping = folio_mapping(folio);
include/trace/events/writeback.h
290
struct inode *inode = mapping ? mapping->host : NULL;
include/trace/events/writeback.h
64
TP_PROTO(struct folio *folio, struct address_space *mapping),
include/trace/events/writeback.h
66
TP_ARGS(folio, mapping),
include/trace/events/writeback.h
76
bdi_dev_name(mapping ? inode_to_bdi(mapping->host) :
include/trace/events/writeback.h
78
__entry->ino = (mapping && mapping->host) ? mapping->host->i_ino : 0;
include/trace/events/writeback.h
91
TP_PROTO(struct folio *folio, struct address_space *mapping),
include/trace/events/writeback.h
93
TP_ARGS(folio, mapping)
include/trace/events/writeback.h
98
TP_PROTO(struct folio *folio, struct address_space *mapping),
include/uapi/drm/tegra_drm.h
759
__u32 mapping;
include/uapi/drm/tegra_drm.h
775
__u32 mapping;
include/uapi/drm/tegra_drm.h
793
__u32 mapping;
ipc/shm.c
942
struct address_space *mapping = inode->i_mapping;
ipc/shm.c
944
*rss_add += pages_per_huge_page(h) * mapping->nrpages;
kernel/events/uprobes.c
1048
static int __copy_insn(struct address_space *mapping, struct file *filp,
kernel/events/uprobes.c
1057
if (mapping->a_ops->read_folio)
kernel/events/uprobes.c
1058
page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp);
kernel/events/uprobes.c
1060
page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
kernel/events/uprobes.c
1072
struct address_space *mapping = uprobe->inode->i_mapping;
kernel/events/uprobes.c
1084
err = __copy_insn(mapping, filp, insn, len, offs);
kernel/events/uprobes.c
1202
build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
kernel/events/uprobes.c
1212
i_mmap_lock_read(mapping);
kernel/events/uprobes.c
1213
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
kernel/events/uprobes.c
1243
i_mmap_unlock_read(mapping);
kernel/futex/core.c
555
struct address_space *mapping;
kernel/futex/core.c
670
mapping = READ_ONCE(folio->mapping);
kernel/futex/core.c
687
if (unlikely(!mapping)) {
kernel/futex/core.c
696
shmem_swizzled = folio_test_swapcache(folio) || folio->mapping;
kernel/futex/core.c
746
if (READ_ONCE(folio->mapping) != mapping) {
kernel/futex/core.c
753
inode = READ_ONCE(mapping->host);
kernel/kexec_core.c
289
pages->mapping = NULL;
kernel/vmcore_info.c
197
VMCOREINFO_OFFSET(page, mapping);
lib/test_hmm.c
1705
rfolio->mapping = NULL;
lib/test_hmm.c
1716
rpage_tail->mapping = NULL;
lib/test_hmm.c
1718
folio_page(tail, 0)->mapping = folio_page(head, 0)->mapping;
mm/compaction.c
1083
mapping = folio_mapping(folio);
mm/compaction.c
1084
if (!mapping && (folio_ref_count(folio) - 1) > folio_mapcount(folio))
mm/compaction.c
1091
if (!(cc->gfp_mask & __GFP_FS) && mapping)
mm/compaction.c
1116
(mapping && is_unevictable)) {
mm/compaction.c
1141
mapping = folio_mapping(folio);
mm/compaction.c
1143
migrate_dirty = !mapping ||
mm/compaction.c
1144
mapping->a_ops->migrate_folio;
mm/compaction.c
1146
is_inaccessible = mapping && mapping_inaccessible(mapping);
mm/compaction.c
847
struct address_space *mapping;
mm/debug.c
105
else if (mapping)
mm/debug.c
106
dump_mapping(mapping);
mm/debug.c
73
struct address_space *mapping = folio_mapping(folio);
mm/debug.c
81
folio_ref_count(folio), mapcount, mapping,
mm/fadvise.c
106
force_page_cache_readahead(mapping, file, start_index, nrpages);
mm/fadvise.c
114
filemap_flush_range(mapping, offset, endbyte);
mm/fadvise.c
157
mapping_try_invalidate(mapping, start_index, end_index,
mm/fadvise.c
167
invalidate_mapping_pages(mapping, start_index,
mm/fadvise.c
34
struct address_space *mapping;
mm/fadvise.c
45
mapping = file->f_mapping;
mm/fadvise.c
46
if (!mapping || len < 0)
mm/fadvise.c
49
bdi = inode_to_bdi(mapping->host);
mm/filemap.c
129
static void page_cache_delete(struct address_space *mapping,
mm/filemap.c
132
XA_STATE(xas, &mapping->i_pages, folio->index);
mm/filemap.c
135
mapping_set_update(&xas, mapping);
mm/filemap.c
145
folio->mapping = NULL;
mm/filemap.c
147
mapping->nrpages -= nr;
mm/filemap.c
150
static void filemap_unaccount_folio(struct address_space *mapping,
mm/filemap.c
1605
struct address_space *mapping = folio->mapping;
mm/filemap.c
1613
if (mapping)
mm/filemap.c
1614
folio_unmap_invalidate(mapping, folio, 0);
mm/filemap.c
163
if (mapping_exiting(mapping) && !folio_test_large(folio)) {
mm/filemap.c
1806
pgoff_t page_cache_next_miss(struct address_space *mapping,
mm/filemap.c
1809
XA_STATE(xas, &mapping->i_pages, index);
mm/filemap.c
1843
pgoff_t page_cache_prev_miss(struct address_space *mapping,
mm/filemap.c
1846
XA_STATE(xas, &mapping->i_pages, index);
mm/filemap.c
1892
void *filemap_get_entry(struct address_space *mapping, pgoff_t index)
mm/filemap.c
1894
XA_STATE(xas, &mapping->i_pages, index);
mm/filemap.c
192
filemap_nr_thps_dec(mapping);
mm/filemap.c
194
if (test_bit(AS_KERNEL_FILE, &folio->mapping->flags))
mm/filemap.c
1940
struct folio *__filemap_get_folio_mpol(struct address_space *mapping,
mm/filemap.c
1946
folio = filemap_get_entry(mapping, index);
mm/filemap.c
1963
if (unlikely(folio->mapping != mapping)) {
mm/filemap.c
1983
unsigned int min_order = mapping_min_folio_order(mapping);
mm/filemap.c
1986
index = mapping_align_index(mapping, index);
mm/filemap.c
1988
if ((fgp_flags & FGP_WRITE) && mapping_can_writeback(mapping))
mm/filemap.c
1999
if (order > mapping_max_folio_order(mapping))
mm/filemap.c
2000
order = mapping_max_folio_order(mapping);
mm/filemap.c
2021
err = filemap_add_folio(mapping, folio, index, gfp);
mm/filemap.c
2115
unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
mm/filemap.c
2118
XA_STATE(xas, &mapping->i_pages, *start);
mm/filemap.c
213
mapping_can_writeback(mapping)))
mm/filemap.c
2136
nr = 1 << xa_get_order(&mapping->i_pages, indices[idx]);
mm/filemap.c
214
folio_account_cleaned(folio, inode_to_wb(mapping->host));
mm/filemap.c
2164
unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
mm/filemap.c
2167
XA_STATE(xas, &mapping->i_pages, *start);
mm/filemap.c
2186
if (folio->mapping != mapping ||
mm/filemap.c
2232
unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
mm/filemap.c
2235
return filemap_get_folios_tag(mapping, start, end, XA_PRESENT, fbatch);
mm/filemap.c
224
struct address_space *mapping = folio->mapping;
mm/filemap.c
2254
unsigned filemap_get_folios_contig(struct address_space *mapping,
mm/filemap.c
2257
XA_STATE(xas, &mapping->i_pages, *start);
mm/filemap.c
227
filemap_unaccount_folio(mapping, folio);
mm/filemap.c
228
page_cache_delete(mapping, folio, shadow);
mm/filemap.c
231
void filemap_free_folio(struct address_space *mapping, struct folio *folio)
mm/filemap.c
2330
unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start,
mm/filemap.c
2333
XA_STATE(xas, &mapping->i_pages, *start);
mm/filemap.c
235
free_folio = mapping->a_ops->free_folio;
mm/filemap.c
2385
unsigned filemap_get_folios_dirty(struct address_space *mapping, pgoff_t *start,
mm/filemap.c
2388
XA_STATE(xas, &mapping->i_pages, *start);
mm/filemap.c
2455
static void filemap_get_read_batch(struct address_space *mapping,
mm/filemap.c
2458
XA_STATE(xas, &mapping->i_pages, index);
mm/filemap.c
2517
static bool filemap_range_uptodate(struct address_space *mapping,
mm/filemap.c
252
struct address_space *mapping = folio->mapping;
mm/filemap.c
2526
if (!mapping->a_ops->is_partially_uptodate)
mm/filemap.c
2528
if (mapping->host->i_blkbits >= folio_shift(folio))
mm/filemap.c
2541
return mapping->a_ops->is_partially_uptodate(folio, pos, count);
mm/filemap.c
2545
struct address_space *mapping, size_t count,
mm/filemap.c
255
spin_lock(&mapping->host->i_lock);
mm/filemap.c
2551
if (!filemap_invalidate_trylock_shared(mapping))
mm/filemap.c
2554
filemap_invalidate_lock_shared(mapping);
mm/filemap.c
256
xa_lock_irq(&mapping->i_pages);
mm/filemap.c
2562
filemap_invalidate_unlock_shared(mapping);
mm/filemap.c
2576
if (!folio->mapping)
mm/filemap.c
258
xa_unlock_irq(&mapping->i_pages);
mm/filemap.c
2580
if (filemap_range_uptodate(mapping, iocb->ki_pos, count, folio,
mm/filemap.c
2588
error = filemap_read_folio(iocb->ki_filp, mapping->a_ops->read_folio,
mm/filemap.c
259
if (mapping_shrinkable(mapping))
mm/filemap.c
2594
filemap_invalidate_unlock_shared(mapping);
mm/filemap.c
260
inode_lru_list_add(mapping->host);
mm/filemap.c
2602
struct address_space *mapping = iocb->ki_filp->f_mapping;
mm/filemap.c
2605
unsigned int min_order = mapping_min_folio_order(mapping);
mm/filemap.c
261
spin_unlock(&mapping->host->i_lock);
mm/filemap.c
2611
folio = filemap_alloc_folio(mapping_gfp_mask(mapping), min_order, NULL);
mm/filemap.c
263
filemap_free_folio(mapping, folio);
mm/filemap.c
2630
filemap_invalidate_lock_shared(mapping);
mm/filemap.c
2632
error = filemap_add_folio(mapping, folio, index,
mm/filemap.c
2633
mapping_gfp_constraint(mapping, GFP_KERNEL));
mm/filemap.c
2639
error = filemap_read_folio(iocb->ki_filp, mapping->a_ops->read_folio,
mm/filemap.c
2644
filemap_invalidate_unlock_shared(mapping);
mm/filemap.c
2648
filemap_invalidate_unlock_shared(mapping);
mm/filemap.c
2654
struct address_space *mapping, struct folio *folio,
mm/filemap.c
2657
DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, folio->index);
mm/filemap.c
2671
struct address_space *mapping = filp->f_mapping;
mm/filemap.c
2680
mapping_min_folio_nrbytes(mapping)) >> PAGE_SHIFT;
mm/filemap.c
2685
filemap_get_read_batch(mapping, index, last_index - 1, fbatch);
mm/filemap.c
2687
DEFINE_READAHEAD(ractl, filp, &filp->f_ra, mapping, index);
mm/filemap.c
2698
filemap_get_read_batch(mapping, index, last_index - 1, fbatch);
mm/filemap.c
2709
err = filemap_readahead(iocb, filp, mapping, folio, last_index);
mm/filemap.c
2718
err = filemap_update_page(iocb, mapping, count, folio,
mm/filemap.c
2724
trace_mm_filemap_get_pages(mapping, index, last_index - 1);
mm/filemap.c
2773
struct address_space *mapping = filp->f_mapping;
mm/filemap.c
2774
struct inode *inode = mapping->host;
mm/filemap.c
279
static void page_cache_delete_batch(struct address_space *mapping,
mm/filemap.c
282
XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index);
mm/filemap.c
2826
writably_mapped = mapping_writably_mapped(mapping);
mm/filemap.c
287
mapping_set_update(&xas, mapping);
mm/filemap.c
2885
struct address_space *mapping = iocb->ki_filp->f_mapping;
mm/filemap.c
2890
if (filemap_range_needs_writeback(mapping, pos, end))
mm/filemap.c
2895
return filemap_write_and_wait_range(mapping, pos, end);
mm/filemap.c
2899
int filemap_invalidate_pages(struct address_space *mapping,
mm/filemap.c
2906
if (filemap_range_has_page(mapping, pos, end))
mm/filemap.c
2909
ret = filemap_write_and_wait_range(mapping, pos, end);
mm/filemap.c
2920
return invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT,
mm/filemap.c
2926
struct address_space *mapping = iocb->ki_filp->f_mapping;
mm/filemap.c
2928
return filemap_invalidate_pages(mapping, iocb->ki_pos,
mm/filemap.c
2966
struct address_space *mapping = file->f_mapping;
mm/filemap.c
2967
struct inode *inode = mapping->host;
mm/filemap.c
2974
retval = mapping->a_ops->direct_IO(iocb, iter);
mm/filemap.c
310
folio->mapping = NULL;
mm/filemap.c
3147
struct address_space *mapping, struct folio *folio,
mm/filemap.c
3150
const struct address_space_operations *ops = mapping->a_ops;
mm/filemap.c
3151
size_t offset, bsz = i_blocksize(mapping->host);
mm/filemap.c
3161
if (unlikely(folio->mapping != mapping))
mm/filemap.c
317
mapping->nrpages -= total_pages;
mm/filemap.c
320
void delete_from_page_cache_batch(struct address_space *mapping,
mm/filemap.c
3204
loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start,
mm/filemap.c
3207
XA_STATE(xas, &mapping->i_pages, start >> PAGE_SHIFT);
mm/filemap.c
3228
start = folio_seek_hole_data(&xas, mapping, folio, start, pos,
mm/filemap.c
328
spin_lock(&mapping->host->i_lock);
mm/filemap.c
329
xa_lock_irq(&mapping->i_pages);
mm/filemap.c
3309
struct address_space *mapping = file->f_mapping;
mm/filemap.c
3310
DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff);
mm/filemap.c
334
filemap_unaccount_folio(mapping, folio);
mm/filemap.c
336
page_cache_delete_batch(mapping, fbatch);
mm/filemap.c
337
xa_unlock_irq(&mapping->i_pages);
mm/filemap.c
338
if (mapping_shrinkable(mapping))
mm/filemap.c
339
inode_lru_list_add(mapping->host);
mm/filemap.c
340
spin_unlock(&mapping->host->i_lock);
mm/filemap.c
343
filemap_free_folio(mapping, fbatch->folios[i]);
mm/filemap.c
346
int filemap_check_errors(struct address_space *mapping)
mm/filemap.c
350
if (test_bit(AS_ENOSPC, &mapping->flags) &&
mm/filemap.c
351
test_and_clear_bit(AS_ENOSPC, &mapping->flags))
mm/filemap.c
3517
struct address_space *mapping = file->f_mapping;
mm/filemap.c
3518
struct inode *inode = mapping->host;
mm/filemap.c
3528
trace_mm_filemap_fault(mapping, index);
mm/filemap.c
353
if (test_bit(AS_EIO, &mapping->flags) &&
mm/filemap.c
3533
folio = filemap_get_folio(mapping, index);
mm/filemap.c
354
test_and_clear_bit(AS_EIO, &mapping->flags))
mm/filemap.c
3542
filemap_invalidate_lock_shared(mapping);
mm/filemap.c
3561
filemap_invalidate_lock_shared(mapping);
mm/filemap.c
3564
folio = __filemap_get_folio(mapping, index,
mm/filemap.c
3570
filemap_invalidate_unlock_shared(mapping);
mm/filemap.c
3579
if (unlikely(folio->mapping != mapping)) {
mm/filemap.c
360
static int filemap_check_and_keep_errors(struct address_space *mapping)
mm/filemap.c
3622
filemap_invalidate_unlock_shared(mapping);
mm/filemap.c
363
if (test_bit(AS_EIO, &mapping->flags))
mm/filemap.c
3646
error = filemap_read_folio(file, mapping->a_ops->read_folio, folio);
mm/filemap.c
365
if (test_bit(AS_ENOSPC, &mapping->flags))
mm/filemap.c
3653
filemap_invalidate_unlock_shared(mapping);
mm/filemap.c
3666
filemap_invalidate_unlock_shared(mapping);
mm/filemap.c
370
static int filemap_writeback(struct address_space *mapping, loff_t start,
mm/filemap.c
3702
struct address_space *mapping, pgoff_t end_pgoff)
mm/filemap.c
3725
if (folio->mapping != mapping)
mm/filemap.c
3729
max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
mm/filemap.c
3752
struct address_space *mapping = folio->mapping;
mm/filemap.c
3769
if ((file_end >= folio_next_index(folio) || shmem_mapping(mapping)) &&
mm/filemap.c
382
if (!mapping_can_writeback(mapping) ||
mm/filemap.c
383
!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
mm/filemap.c
386
wbc_attach_fdatawrite_inode(&wbc, mapping->host);
mm/filemap.c
387
ret = do_writepages(mapping, &wbc);
mm/filemap.c
3876
struct address_space *mapping = file->f_mapping;
mm/filemap.c
3879
XA_STATE(xas, &mapping->i_pages, start_pgoff);
mm/filemap.c
3891
file_end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE) - 1;
mm/filemap.c
3895
folio = next_uptodate_folio(&xas, mapping, end_pgoff);
mm/filemap.c
3906
if ((file_end >= folio_next_index(folio) || shmem_mapping(mapping)) &&
mm/filemap.c
3939
} while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL);
mm/filemap.c
3942
trace_mm_filemap_map_pages(mapping, start_pgoff, end_pgoff);
mm/filemap.c
3958
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
mm/filemap.c
3962
sb_start_pagefault(mapping->host->i_sb);
mm/filemap.c
3965
if (folio->mapping != mapping) {
mm/filemap.c
3978
sb_end_pagefault(mapping->host->i_sb);
mm/filemap.c
3992
struct address_space *mapping = file->f_mapping;
mm/filemap.c
3994
if (!mapping->a_ops->read_folio)
mm/filemap.c
4004
struct address_space *mapping = file->f_mapping;
mm/filemap.c
4006
if (!mapping->a_ops->read_folio)
mm/filemap.c
4058
static struct folio *do_read_cache_folio(struct address_space *mapping,
mm/filemap.c
4065
filler = mapping->a_ops->read_folio;
mm/filemap.c
4067
folio = filemap_get_folio(mapping, index);
mm/filemap.c
4069
folio = filemap_alloc_folio(gfp, mapping_min_folio_order(mapping), NULL);
mm/filemap.c
4072
index = mapping_align_index(mapping, index);
mm/filemap.c
4073
err = filemap_add_folio(mapping, folio, index, gfp);
mm/filemap.c
409
int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
mm/filemap.c
4093
if (!folio->mapping) {
mm/filemap.c
412
return filemap_writeback(mapping, start, end, WB_SYNC_ALL, NULL);
mm/filemap.c
4135
struct folio *read_cache_folio(struct address_space *mapping, pgoff_t index,
mm/filemap.c
4138
return do_read_cache_folio(mapping, index, filler, file,
mm/filemap.c
4139
mapping_gfp_mask(mapping));
mm/filemap.c
416
int filemap_fdatawrite(struct address_space *mapping)
mm/filemap.c
4160
struct folio *mapping_read_folio_gfp(struct address_space *mapping,
mm/filemap.c
4163
return do_read_cache_folio(mapping, index, NULL, NULL, gfp);
mm/filemap.c
4167
static struct page *do_read_cache_page(struct address_space *mapping,
mm/filemap.c
4172
folio = do_read_cache_folio(mapping, index, filler, file, gfp);
mm/filemap.c
4178
struct page *read_cache_page(struct address_space *mapping,
mm/filemap.c
418
return filemap_fdatawrite_range(mapping, 0, LLONG_MAX);
mm/filemap.c
4181
return do_read_cache_page(mapping, index, filler, file,
mm/filemap.c
4182
mapping_gfp_mask(mapping));
mm/filemap.c
4201
struct page *read_cache_page_gfp(struct address_space *mapping,
mm/filemap.c
4205
return do_read_cache_page(mapping, index, NULL, NULL, gfp);
mm/filemap.c
4231
struct address_space *mapping = iocb->ki_filp->f_mapping;
mm/filemap.c
4233
if (mapping->nrpages &&
mm/filemap.c
4234
invalidate_inode_pages2_range(mapping,
mm/filemap.c
4243
struct address_space *mapping = iocb->ki_filp->f_mapping;
mm/filemap.c
4258
written = mapping->a_ops->direct_IO(iocb, from);
mm/filemap.c
4278
struct inode *inode = mapping->host;
mm/filemap.c
4300
struct address_space *mapping = file->f_mapping;
mm/filemap.c
4301
const struct address_space_operations *a_ops = mapping->a_ops;
mm/filemap.c
4302
size_t chunk = mapping_max_folio_size(mapping);
mm/filemap.c
4317
balance_dirty_pages_ratelimited(mapping);
mm/filemap.c
4324
status = a_ops->write_begin(iocb, mapping, pos, bytes,
mm/filemap.c
433
int filemap_flush_range(struct address_space *mapping, loff_t start,
mm/filemap.c
4333
if (mapping_writably_mapped(mapping))
mm/filemap.c
4345
status = a_ops->write_end(iocb, mapping, pos, bytes, copied,
mm/filemap.c
436
return filemap_writeback(mapping, start, end, WB_SYNC_NONE, NULL);
mm/filemap.c
4414
struct address_space *mapping = file->f_mapping;
mm/filemap.c
4415
struct inode *inode = mapping->host;
mm/filemap.c
449
int filemap_flush(struct address_space *mapping)
mm/filemap.c
4495
struct address_space * const mapping = folio->mapping;
mm/filemap.c
4503
if (mapping && mapping->a_ops->release_folio)
mm/filemap.c
4504
return mapping->a_ops->release_folio(folio, gfp);
mm/filemap.c
451
return filemap_flush_range(mapping, 0, LLONG_MAX);
mm/filemap.c
4525
struct address_space *mapping = inode->i_mapping;
mm/filemap.c
4530
if (!mapping || !mapping->nrpages || end < start)
mm/filemap.c
4534
filemap_invalidate_lock(mapping);
mm/filemap.c
4536
if (!mapping->nrpages)
mm/filemap.c
4539
unmap_mapping_pages(mapping, first, nr, false);
mm/filemap.c
4543
filemap_fdatawrite_range(mapping, start, end);
mm/filemap.c
4546
invalidate_inode_pages2_range(mapping, start / PAGE_SIZE, end / PAGE_SIZE);
mm/filemap.c
4549
filemap_invalidate_unlock(mapping);
mm/filemap.c
4551
return filemap_check_errors(mapping);
mm/filemap.c
4568
static void filemap_cachestat(struct address_space *mapping,
mm/filemap.c
4571
XA_STATE(xas, &mapping->i_pages, first_index);
mm/filemap.c
460
int filemap_flush_nr(struct address_space *mapping, long *nr_to_write)
mm/filemap.c
4617
if (shmem_mapping(mapping)) {
mm/filemap.c
462
return filemap_writeback(mapping, 0, LLONG_MAX, WB_SYNC_NONE,
mm/filemap.c
4717
struct address_space *mapping;
mm/filemap.c
4743
mapping = fd_file(f)->f_mapping;
mm/filemap.c
4744
filemap_cachestat(mapping, first_index, last_index, &cs);
mm/filemap.c
479
bool filemap_range_has_page(struct address_space *mapping,
mm/filemap.c
483
XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT);
mm/filemap.c
510
static void __filemap_fdatawait_range(struct address_space *mapping,
mm/filemap.c
523
nr_folios = filemap_get_folios_tag(mapping, &index, end,
mm/filemap.c
555
int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
mm/filemap.c
558
__filemap_fdatawait_range(mapping, start_byte, end_byte);
mm/filemap.c
559
return filemap_check_errors(mapping);
mm/filemap.c
577
int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
mm/filemap.c
580
__filemap_fdatawait_range(mapping, start_byte, end_byte);
mm/filemap.c
581
return filemap_check_and_keep_errors(mapping);
mm/filemap.c
603
struct address_space *mapping = file->f_mapping;
mm/filemap.c
605
__filemap_fdatawait_range(mapping, start_byte, end_byte);
mm/filemap.c
624
int filemap_fdatawait_keep_errors(struct address_space *mapping)
mm/filemap.c
626
__filemap_fdatawait_range(mapping, 0, LLONG_MAX);
mm/filemap.c
627
return filemap_check_and_keep_errors(mapping);
mm/filemap.c
632
static bool mapping_needs_writeback(struct address_space *mapping)
mm/filemap.c
634
return mapping->nrpages;
mm/filemap.c
637
bool filemap_range_has_writeback(struct address_space *mapping,
mm/filemap.c
640
XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT);
mm/filemap.c
675
int filemap_write_and_wait_range(struct address_space *mapping,
mm/filemap.c
683
if (mapping_needs_writeback(mapping)) {
mm/filemap.c
684
err = filemap_fdatawrite_range(mapping, lstart, lend);
mm/filemap.c
692
__filemap_fdatawait_range(mapping, lstart, lend);
mm/filemap.c
694
err2 = filemap_check_errors(mapping);
mm/filemap.c
701
void __filemap_set_wb_err(struct address_space *mapping, int err)
mm/filemap.c
703
errseq_t eseq = errseq_set(&mapping->wb_err, err);
mm/filemap.c
705
trace_filemap_set_wb_err(mapping, eseq);
mm/filemap.c
737
struct address_space *mapping = file->f_mapping;
mm/filemap.c
740
if (errseq_check(&mapping->wb_err, old)) {
mm/filemap.c
744
err = errseq_check_and_advance(&mapping->wb_err,
mm/filemap.c
755
clear_bit(AS_EIO, &mapping->flags);
mm/filemap.c
756
clear_bit(AS_ENOSPC, &mapping->flags);
mm/filemap.c
780
struct address_space *mapping = file->f_mapping;
mm/filemap.c
785
if (mapping_needs_writeback(mapping)) {
mm/filemap.c
786
err = filemap_fdatawrite_range(mapping, lstart, lend);
mm/filemap.c
789
__filemap_fdatawait_range(mapping, lstart, lend);
mm/filemap.c
813
struct address_space *mapping = old->mapping;
mm/filemap.c
814
void (*free_folio)(struct folio *) = mapping->a_ops->free_folio;
mm/filemap.c
816
XA_STATE(xas, &mapping->i_pages, offset);
mm/filemap.c
820
VM_BUG_ON_FOLIO(new->mapping, new);
mm/filemap.c
823
new->mapping = mapping;
mm/filemap.c
831
old->mapping = NULL;
mm/filemap.c
848
noinline int __filemap_add_folio(struct address_space *mapping,
mm/filemap.c
851
XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio));
mm/filemap.c
858
VM_BUG_ON_FOLIO(folio_order(folio) < mapping_min_folio_order(mapping),
mm/filemap.c
860
mapping_set_update(&xas, mapping);
mm/filemap.c
868
folio->mapping = mapping;
mm/filemap.c
896
BUG_ON(shmem_mapping(mapping));
mm/filemap.c
919
mapping->nrpages += nr;
mm/filemap.c
942
folio->mapping = NULL;
mm/filemap.c
949
int filemap_add_folio(struct address_space *mapping, struct folio *folio,
mm/filemap.c
955
bool kernel_file = test_bit(AS_KERNEL_FILE, &mapping->flags);
mm/filemap.c
966
ret = __filemap_add_folio(mapping, folio, index, gfp, &shadow);
mm/folio-compat.c
69
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
mm/folio-compat.c
72
return filemap_add_folio(mapping, page_folio(page), index, gfp);
mm/folio-compat.c
77
struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
mm/folio-compat.c
82
folio = __filemap_get_folio(mapping, index, fgp_flags, gfp);
mm/gup.c
2741
struct address_space *mapping;
mm/gup.c
2784
mapping = READ_ONCE(folio->mapping);
mm/gup.c
2791
if (!mapping)
mm/gup.c
2795
mapping_flags = (unsigned long)mapping & FOLIO_MAPPING_FLAGS;
mm/gup.c
2803
if (check_secretmem && secretmem_mapping(mapping))
mm/gup.c
2806
return !reject_file_backed || shmem_mapping(mapping);
mm/huge_memory.c
3549
new_folio->mapping = folio->mapping;
mm/huge_memory.c
3630
struct address_space *mapping, enum split_type split_type)
mm/huge_memory.c
3651
if (mapping) {
mm/huge_memory.c
3719
if (!folio->mapping && !folio_test_anon(folio))
mm/huge_memory.c
3728
!mapping_large_folio_support(folio->mapping)) {
mm/huge_memory.c
3781
struct address_space *mapping, bool do_lru,
mm/huge_memory.c
3791
VM_WARN_ON_ONCE(!mapping && end);
mm/huge_memory.c
3816
if (mapping) {
mm/huge_memory.c
3827
filemap_nr_thps_dec(mapping);
mm/huge_memory.c
3833
if (mapping) {
mm/huge_memory.c
3834
VM_WARN_ON_ONCE_FOLIO(mapping, folio);
mm/huge_memory.c
3846
mapping, split_type);
mm/huge_memory.c
3880
if (!mapping)
mm/huge_memory.c
3885
__xa_store(&mapping->i_pages, new_folio->index,
mm/huge_memory.c
3892
if (shmem_mapping(mapping) && nr_shmem_dropped)
mm/huge_memory.c
3896
new_folio, inode_to_wb(mapping->host));
mm/huge_memory.c
3948
XA_STATE(xas, &folio->mapping->i_pages, folio->index);
mm/huge_memory.c
3951
struct address_space *mapping = NULL;
mm/huge_memory.c
3994
mapping = NULL;
mm/huge_memory.c
3999
mapping = folio->mapping;
mm/huge_memory.c
4000
min_order = mapping_min_folio_order(folio->mapping);
mm/huge_memory.c
4006
gfp = current_gfp_context(mapping_gfp_mask(mapping) &
mm/huge_memory.c
4024
i_mmap_lock_read(mapping);
mm/huge_memory.c
4033
end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
mm/huge_memory.c
4034
if (shmem_mapping(mapping))
mm/huge_memory.c
4035
end = shmem_fallocend(mapping->host, end);
mm/huge_memory.c
4051
if (mapping) {
mm/huge_memory.c
4064
ret = __folio_freeze_and_split_unmapped(folio, new_order, split_at, &xas, mapping,
mm/huge_memory.c
4067
if (mapping)
mm/huge_memory.c
4073
shmem_uncharge(mapping->host, nr_shmem_dropped);
mm/huge_memory.c
4105
if (mapping)
mm/huge_memory.c
4106
i_mmap_unlock_read(mapping);
mm/huge_memory.c
4265
if (!folio->mapping)
mm/huge_memory.c
4268
return mapping_min_folio_order(folio->mapping);
mm/huge_memory.c
4623
struct address_space *mapping;
mm/huge_memory.c
4643
mapping = folio->mapping;
mm/huge_memory.c
4645
mapping_min_folio_order(mapping));
mm/huge_memory.c
4666
if (!folio_test_anon(folio) && folio->mapping != mapping)
mm/huge_memory.c
4705
struct address_space *mapping;
mm/huge_memory.c
4720
mapping = candidate->f_mapping;
mm/huge_memory.c
4721
min_order = mapping_min_folio_order(mapping);
mm/huge_memory.c
4725
struct folio *folio = filemap_get_folio(mapping, index);
mm/huge_memory.c
4743
if (folio->mapping != mapping)
mm/hugetlb.c
1175
struct address_space *mapping = vma->vm_file->f_mapping;
mm/hugetlb.c
1176
struct inode *inode = mapping->host;
mm/hugetlb.c
1591
struct folio, mapping);
mm/hugetlb.c
1593
folio->mapping = NULL;
mm/hugetlb.c
1630
if (llist_add((struct llist_node *)&folio->mapping, &hpage_freelist))
mm/hugetlb.c
1755
folio->mapping = NULL;
mm/hugetlb.c
1833
struct address_space *mapping = folio_mapping(folio);
mm/hugetlb.c
1835
if (!mapping)
mm/hugetlb.c
1836
return mapping;
mm/hugetlb.c
1838
if (i_mmap_trylock_write(mapping))
mm/hugetlb.c
1839
return mapping;
mm/hugetlb.c
4012
new_folio->mapping = NULL;
mm/hugetlb.c
5113
struct address_space *mapping = vma->vm_file->f_mapping;
mm/hugetlb.c
5136
i_mmap_lock_write(mapping);
mm/hugetlb.c
5165
i_mmap_unlock_write(mapping);
mm/hugetlb.c
5398
struct address_space *mapping;
mm/hugetlb.c
5408
mapping = vma->vm_file->f_mapping;
mm/hugetlb.c
5415
i_mmap_lock_write(mapping);
mm/hugetlb.c
5416
vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
mm/hugetlb.c
5441
i_mmap_unlock_write(mapping);
mm/hugetlb.c
5543
struct address_space *mapping = vma->vm_file->f_mapping;
mm/hugetlb.c
5558
hash = hugetlb_fault_mutex_hash(mapping, idx);
mm/hugetlb.c
5648
struct address_space *mapping = vma->vm_file->f_mapping;
mm/hugetlb.c
5652
folio = filemap_get_folio(mapping, idx);
mm/hugetlb.c
5659
int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
mm/hugetlb.c
5662
struct inode *inode = mapping->host;
mm/hugetlb.c
5668
err = __filemap_add_folio(mapping, folio, idx, GFP_KERNEL, NULL);
mm/hugetlb.c
5689
struct address_space *mapping,
mm/hugetlb.c
5700
hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff);
mm/hugetlb.c
5722
static vm_fault_t hugetlb_no_page(struct address_space *mapping,
mm/hugetlb.c
5725
u32 hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff);
mm/hugetlb.c
5753
folio = filemap_lock_hugetlb_folio(h, mapping, vmf->pgoff);
mm/hugetlb.c
5755
size = i_size_read(mapping->host) >> huge_page_shift(h);
mm/hugetlb.c
5782
return hugetlb_handle_userfault(vmf, mapping,
mm/hugetlb.c
5817
int err = hugetlb_add_to_page_cache(folio, mapping,
mm/hugetlb.c
5858
return hugetlb_handle_userfault(vmf, mapping,
mm/hugetlb.c
5949
u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
mm/hugetlb.c
5954
key[0] = (unsigned long) mapping;
mm/hugetlb.c
5966
u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
mm/hugetlb.c
5979
struct address_space *mapping;
mm/hugetlb.c
6001
mapping = vma->vm_file->f_mapping;
mm/hugetlb.c
6002
hash = hugetlb_fault_mutex_hash(mapping, vmf.pgoff);
mm/hugetlb.c
6024
return hugetlb_no_page(mapping, &vmf);
mm/hugetlb.c
6040
return hugetlb_no_page(mapping, &vmf);
mm/hugetlb.c
6205
struct address_space *mapping = dst_vma->vm_file->f_mapping;
mm/hugetlb.c
6237
folio = filemap_lock_hugetlb_folio(h, mapping, idx);
mm/hugetlb.c
6333
if (idx >= (i_size_read(mapping->host) >> huge_page_shift(h)))
mm/hugetlb.c
6342
ret = hugetlb_add_to_page_cache(folio, mapping, idx);
mm/hugetlb.c
6881
struct address_space *mapping = vma->vm_file->f_mapping;
mm/hugetlb.c
6889
i_mmap_lock_read(mapping);
mm/hugetlb.c
6890
vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
mm/hugetlb.c
6919
i_mmap_unlock_read(mapping);
mm/hwpoison-inject.c
23
struct address_space *mapping;
mm/hwpoison-inject.c
30
mapping = folio_mapping(folio);
mm/hwpoison-inject.c
31
if (mapping == NULL || mapping->host == NULL)
mm/hwpoison-inject.c
34
dev = mapping->host->i_sb->s_dev;
mm/internal.h
150
unsigned long mapping = (unsigned long)folio->mapping;
mm/internal.h
152
return (void *)(mapping & ~FOLIO_MAPPING_FLAGS);
mm/internal.h
1666
struct address_space *mapping,
mm/internal.h
1719
#define mapping_set_update(xas, mapping) do { \
mm/internal.h
1720
if (!dax_mapping(mapping) && !shmem_mapping(mapping)) { \
mm/internal.h
527
int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
mm/internal.h
532
static inline void force_page_cache_readahead(struct address_space *mapping,
mm/internal.h
535
DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index);
mm/internal.h
539
unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
mm/internal.h
541
unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
mm/internal.h
543
void filemap_free_folio(struct address_space *mapping, struct folio *folio);
mm/internal.h
544
int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
mm/internal.h
547
long mapping_evict_folio(struct address_space *mapping, struct folio *folio);
mm/internal.h
548
unsigned long mapping_try_invalidate(struct address_space *mapping,
mm/internal.h
598
struct address_space *mapping = folio_mapping(folio);
mm/internal.h
601
(mapping && mapping_release_always(mapping));
mm/internal.h
885
p->mapping = TAIL_MAPPING;
mm/khugepaged.c
1741
static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
mm/khugepaged.c
1745
i_mmap_lock_read(mapping);
mm/khugepaged.c
1746
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
mm/khugepaged.c
1821
i_mmap_unlock_read(mapping);
mm/khugepaged.c
1852
struct address_space *mapping = file->f_mapping;
mm/khugepaged.c
1857
XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
mm/khugepaged.c
1869
mapping_set_update(&xas, mapping);
mm/khugepaged.c
1875
new_folio->mapping = mapping;
mm/khugepaged.c
1919
if (shmem_get_folio(mapping->host, index, 0,
mm/khugepaged.c
1936
page_cache_sync_readahead(mapping, &file->f_ra,
mm/khugepaged.c
1941
folio = filemap_lock_folio(mapping, index);
mm/khugepaged.c
1961
filemap_flush(mapping);
mm/khugepaged.c
2001
if (folio_mapping(folio) != mapping) {
mm/khugepaged.c
2067
filemap_nr_thps_inc(mapping);
mm/khugepaged.c
2075
if (inode_is_open_for_write(mapping->host)) {
mm/khugepaged.c
2077
filemap_nr_thps_dec(mapping);
mm/khugepaged.c
2093
!shmem_charge(mapping->host, nr_none))
mm/khugepaged.c
2133
i_mmap_lock_read(mapping);
mm/khugepaged.c
2166
vma_interval_tree_foreach(vma, &mapping->i_mmap, start, end) {
mm/khugepaged.c
2174
i_mmap_unlock_read(mapping);
mm/khugepaged.c
2219
retract_page_tables(mapping, start);
mm/khugepaged.c
2234
folio->mapping = NULL;
mm/khugepaged.c
2247
mapping->nrpages -= nr_none;
mm/khugepaged.c
2249
shmem_uncharge(mapping->host, nr_none);
mm/khugepaged.c
2264
filemap_nr_thps_dec(mapping);
mm/khugepaged.c
2272
new_folio->mapping = NULL;
mm/khugepaged.c
2286
struct address_space *mapping = file->f_mapping;
mm/khugepaged.c
2287
XA_STATE(xas, &mapping->i_pages, start);
mm/ksm.c
1000
if (READ_ONCE(folio->mapping) != expected_mapping) {
mm/ksm.c
1096
folio->mapping = (void *)((unsigned long)stable_node | FOLIO_MAPPING_KSM);
mm/ksm.c
3273
VM_BUG_ON_FOLIO(newfolio->mapping != folio->mapping, newfolio);
mm/ksm.c
958
if (READ_ONCE(folio->mapping) != expected_mapping)
mm/ksm.c
986
if (READ_ONCE(folio->mapping) != expected_mapping) {
mm/madvise.c
234
struct address_space *mapping)
mm/madvise.c
236
XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start));
mm/madvise.c
258
folio = read_swap_cache_async(entry, mapping_gfp_mask(mapping),
mm/mapping_dirty_helpers.c
263
unsigned long wp_shared_mapping_range(struct address_space *mapping,
mm/mapping_dirty_helpers.c
268
i_mmap_lock_read(mapping);
mm/mapping_dirty_helpers.c
269
WARN_ON(walk_page_mapping(mapping, first_index, nr, &wp_walk_ops,
mm/mapping_dirty_helpers.c
271
i_mmap_unlock_read(mapping);
mm/mapping_dirty_helpers.c
313
unsigned long clean_record_shared_mapping_range(struct address_space *mapping,
mm/mapping_dirty_helpers.c
329
i_mmap_lock_read(mapping);
mm/mapping_dirty_helpers.c
330
WARN_ON(walk_page_mapping(mapping, first_index, nr, &clean_walk_ops,
mm/mapping_dirty_helpers.c
332
i_mmap_unlock_read(mapping);
mm/memfd.c
155
static int memfd_wait_for_pins(struct address_space *mapping)
mm/memfd.c
157
XA_STATE(xas, &mapping->i_pages, 0);
mm/memfd_luo.c
396
struct address_space *mapping = inode->i_mapping;
mm/memfd_luo.c
424
err = mem_cgroup_charge(folio, NULL, mapping_gfp_mask(mapping));
mm/memfd_luo.c
431
err = shmem_add_to_page_cache(folio, mapping, index, NULL,
mm/memfd_luo.c
432
mapping_gfp_mask(mapping));
mm/memory-failure.c
1024
struct address_space *mapping;
mm/memory-failure.c
1045
mapping = folio_mapping(folio);
mm/memory-failure.c
1046
if (!mapping) {
mm/memory-failure.c
1056
extra_pins = shmem_mapping(mapping);
mm/memory-failure.c
1063
ret = truncate_error_folio(folio, page_to_pfn(p), mapping);
mm/memory-failure.c
1081
struct address_space *mapping = folio_mapping(folio);
mm/memory-failure.c
1084
if (mapping) {
mm/memory-failure.c
1091
mapping_set_error(mapping, -EIO);
mm/memory-failure.c
1164
struct address_space *mapping;
mm/memory-failure.c
1167
mapping = folio_mapping(folio);
mm/memory-failure.c
1168
if (mapping) {
mm/memory-failure.c
1169
res = truncate_error_folio(folio, page_to_pfn(p), mapping);
mm/memory-failure.c
1527
struct address_space *mapping;
mm/memory-failure.c
1540
mapping = folio_mapping(folio);
mm/memory-failure.c
1541
if (!must_kill && !folio_test_dirty(folio) && mapping &&
mm/memory-failure.c
1542
mapping_can_writeback(mapping)) {
mm/memory-failure.c
1560
mapping = hugetlb_folio_mapping_lock_write(folio);
mm/memory-failure.c
1561
if (!mapping) {
mm/memory-failure.c
1568
i_mmap_unlock_write(mapping);
mm/memory-failure.c
1685
struct address_space *mapping, pgoff_t index, int flags)
mm/memory-failure.c
1703
unmap_mapping_range(mapping, start, size, 0);
mm/memory-failure.c
1770
unmap_and_kill(&to_kill, pfn, folio->mapping, folio->index, flags);
mm/memory-failure.c
1784
int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
mm/memory-failure.c
1797
cookie = dax_lock_mapping_entry(mapping, index, &page);
mm/memory-failure.c
1810
collect_procs_fsdax(page, mapping, index, &to_kill, pre_remove);
mm/memory-failure.c
1811
unmap_and_kill(&to_kill, page_to_pfn(page), mapping,
mm/memory-failure.c
1814
dax_unlock_mapping_entry(mapping, index, cookie);
mm/memory-failure.c
2244
struct address_space *mapping = pfn_space->mapping;
mm/memory-failure.c
2246
i_mmap_lock_read(mapping);
mm/memory-failure.c
2254
vma_interval_tree_foreach(vma, &mapping->i_mmap, 0, ULONG_MAX) {
mm/memory-failure.c
2263
i_mmap_unlock_read(mapping);
mm/memory-failure.c
2545
folio->mapping == NULL) {
mm/memory-failure.c
589
struct address_space *mapping = folio->mapping;
mm/memory-failure.c
592
i_mmap_lock_read(mapping);
mm/memory-failure.c
601
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff,
mm/memory-failure.c
617
i_mmap_unlock_read(mapping);
mm/memory-failure.c
633
struct address_space *mapping, pgoff_t pgoff,
mm/memory-failure.c
639
i_mmap_lock_read(mapping);
mm/memory-failure.c
653
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
mm/memory-failure.c
659
i_mmap_unlock_read(mapping);
mm/memory-failure.c
669
if (!folio->mapping)
mm/memory-failure.c
936
struct address_space *mapping)
mm/memory-failure.c
940
if (mapping->a_ops->error_remove_folio) {
mm/memory-failure.c
941
int err = mapping->a_ops->error_remove_folio(mapping, folio);
mm/memory-failure.c
954
if (mapping_evict_folio(mapping, folio))
mm/memory.c
3596
if (!folio->mapping) {
mm/memory.c
3614
struct address_space *mapping;
mm/memory.c
3627
mapping = folio_raw_mapping(folio);
mm/memory.c
3642
if ((dirtied || page_mkwrite) && mapping) {
mm/memory.c
3646
balance_dirty_pages_ratelimited(mapping);
mm/memory.c
4285
struct address_space *mapping = folio->mapping;
mm/memory.c
4299
i_mmap_lock_read(mapping);
mm/memory.c
4300
if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
mm/memory.c
4301
unmap_mapping_range_tree(&mapping->i_mmap, first_index,
mm/memory.c
4303
i_mmap_unlock_read(mapping);
mm/memory.c
4318
void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
mm/memory.c
4329
i_mmap_lock_read(mapping);
mm/memory.c
4330
if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
mm/memory.c
4331
unmap_mapping_range_tree(&mapping->i_mmap, first_index,
mm/memory.c
4333
i_mmap_unlock_read(mapping);
mm/memory.c
4354
void unmap_mapping_range(struct address_space *mapping,
mm/memory.c
4368
unmap_mapping_pages(mapping, hba, hlen, even_cows);
mm/memory.c
5376
if (mapping_evict_folio(folio->mapping, folio))
mm/memory.c
5589
struct address_space *mapping = vma->vm_file->f_mapping;
mm/memory.c
5592
file_end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
mm/memory.c
5601
needs_fallback = !shmem_mapping(mapping) &&
mm/memory.c
603
struct address_space *mapping;
mm/memory.c
609
mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
mm/memory.c
618
(void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
mm/memory.c
624
mapping ? mapping->a_ops->read_folio : NULL);
mm/memory.c
6746
struct address_space *mapping = file ? file->f_mapping : NULL;
mm/memory.c
6748
if (mapping)
mm/memory.c
6749
lockdep_assert(lockdep_is_held(&mapping->i_mmap_rwsem) ||
mm/memremap.c
449
folio->mapping = NULL;
mm/memremap.c
510
new_folio->mapping = NULL;
mm/migrate.c
1020
int buffer_migrate_folio(struct address_space *mapping,
mm/migrate.c
1023
return __buffer_migrate_folio(mapping, dst, src, mode, false);
mm/migrate.c
1041
int buffer_migrate_folio_norefs(struct address_space *mapping,
mm/migrate.c
1044
return __buffer_migrate_folio(mapping, dst, src, mode, true);
mm/migrate.c
1049
int filemap_migrate_folio(struct address_space *mapping,
mm/migrate.c
1052
return __migrate_folio(mapping, dst, src, folio_get_private(src), mode);
mm/migrate.c
1059
static int fallback_migrate_folio(struct address_space *mapping,
mm/migrate.c
1062
WARN_ONCE(mapping->a_ops->writepages,
mm/migrate.c
1064
mapping->a_ops);
mm/migrate.c
1075
return migrate_folio(mapping, dst, src, mode);
mm/migrate.c
1093
struct address_space *mapping = folio_mapping(src);
mm/migrate.c
1099
if (!mapping)
mm/migrate.c
1100
rc = migrate_folio(mapping, dst, src, mode);
mm/migrate.c
1101
else if (mapping_inaccessible(mapping))
mm/migrate.c
1103
else if (mapping->a_ops->migrate_folio)
mm/migrate.c
1111
rc = mapping->a_ops->migrate_folio(mapping, dst, src,
mm/migrate.c
1114
rc = fallback_migrate_folio(mapping, dst, src, mode);
mm/migrate.c
1122
src->mapping = NULL;
mm/migrate.c
1319
if (!src->mapping) {
mm/migrate.c
1461
struct address_space *mapping = NULL;
mm/migrate.c
1510
mapping = hugetlb_folio_mapping_lock_write(src);
mm/migrate.c
1511
if (unlikely(!mapping))
mm/migrate.c
1528
i_mmap_unlock_write(mapping);
mm/migrate.c
571
static int __folio_migrate_mapping(struct address_space *mapping,
mm/migrate.c
574
XA_STATE(xas, &mapping->i_pages, folio->index);
mm/migrate.c
580
if (!mapping) {
mm/migrate.c
592
newfolio->mapping = folio->mapping;
mm/migrate.c
625
newfolio->mapping = folio->mapping;
mm/migrate.c
696
if (dirty && mapping_can_writeback(mapping)) {
mm/migrate.c
708
int folio_migrate_mapping(struct address_space *mapping,
mm/migrate.c
716
return __folio_migrate_mapping(mapping, newfolio, folio, expected_count);
mm/migrate.c
724
int migrate_huge_page_move_mapping(struct address_space *mapping,
mm/migrate.c
727
XA_STATE(xas, &mapping->i_pages, src->index);
mm/migrate.c
744
dst->mapping = src->mapping;
mm/migrate.c
855
static int __migrate_folio(struct address_space *mapping, struct folio *dst,
mm/migrate.c
869
rc = __folio_migrate_mapping(mapping, dst, src, expected_count);
mm/migrate.c
892
int migrate_folio(struct address_space *mapping, struct folio *dst,
mm/migrate.c
896
return __migrate_folio(mapping, dst, src, NULL, mode);
mm/migrate.c
934
static int __buffer_migrate_folio(struct address_space *mapping,
mm/migrate.c
944
return migrate_folio(mapping, dst, src, mode);
mm/migrate.c
962
spin_lock(&mapping->i_private_lock);
mm/migrate.c
971
spin_unlock(&mapping->i_private_lock);
mm/migrate.c
983
rc = filemap_migrate_folio(mapping, dst, src, mode);
mm/migrate_device.c
1113
struct address_space *mapping;
mm/migrate_device.c
1165
mapping = folio_mapping(folio);
mm/migrate_device.c
1198
if (mapping) {
mm/migrate_device.c
1227
r = folio_migrate_mapping(mapping, newfolio, folio, extra_cnt);
mm/migrate_device.c
377
if (!page || !page->mapping) {
mm/mincore.c
108
static unsigned char mincore_page(struct address_space *mapping, pgoff_t index)
mm/mincore.c
119
folio = filemap_get_entry(mapping, index);
mm/mincore.c
122
if (shmem_mapping(mapping))
mm/mmap.c
1826
struct address_space *mapping = file->f_mapping;
mm/mmap.c
1829
i_mmap_lock_write(mapping);
mm/mmap.c
1831
mapping_allow_writable(mapping);
mm/mmap.c
1832
flush_dcache_mmap_lock(mapping);
mm/mmap.c
1835
&mapping->i_mmap);
mm/mmap.c
1836
flush_dcache_mmap_unlock(mapping);
mm/mmap.c
1837
i_mmap_unlock_write(mapping);
mm/nommu.c
568
struct address_space *mapping = vma->vm_file->f_mapping;
mm/nommu.c
570
i_mmap_lock_write(mapping);
mm/nommu.c
571
flush_dcache_mmap_lock(mapping);
mm/nommu.c
572
vma_interval_tree_insert(vma, &mapping->i_mmap);
mm/nommu.c
573
flush_dcache_mmap_unlock(mapping);
mm/nommu.c
574
i_mmap_unlock_write(mapping);
mm/nommu.c
583
struct address_space *mapping;
mm/nommu.c
584
mapping = vma->vm_file->f_mapping;
mm/nommu.c
586
i_mmap_lock_write(mapping);
mm/nommu.c
587
flush_dcache_mmap_lock(mapping);
mm/nommu.c
588
vma_interval_tree_remove(vma, &mapping->i_mmap);
mm/nommu.c
589
flush_dcache_mmap_unlock(mapping);
mm/nommu.c
590
i_mmap_unlock_write(mapping);
mm/page-writeback.c
2045
int balance_dirty_pages_ratelimited_flags(struct address_space *mapping,
mm/page-writeback.c
2048
struct inode *inode = mapping->host;
mm/page-writeback.c
2115
void balance_dirty_pages_ratelimited(struct address_space *mapping)
mm/page-writeback.c
2117
balance_dirty_pages_ratelimited_flags(mapping, 0);
mm/page-writeback.c
2360
void tag_pages_for_writeback(struct address_space *mapping,
mm/page-writeback.c
2363
XA_STATE(xas, &mapping->i_pages, start);
mm/page-writeback.c
2382
static bool folio_prepare_writeback(struct address_space *mapping,
mm/page-writeback.c
2392
if (unlikely(folio->mapping != mapping))
mm/page-writeback.c
2422
static struct folio *writeback_get_folio(struct address_space *mapping,
mm/page-writeback.c
2432
filemap_get_folios_tag(mapping, &wbc->index, wbc_end(wbc),
mm/page-writeback.c
2440
if (unlikely(!folio_prepare_writeback(mapping, wbc, folio))) {
mm/page-writeback.c
2445
trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
mm/page-writeback.c
2475
struct folio *writeback_iter(struct address_space *mapping,
mm/page-writeback.c
2490
wbc->index = mapping->writeback_index;
mm/page-writeback.c
2506
tag_pages_for_writeback(mapping, wbc->index,
mm/page-writeback.c
2533
folio = writeback_get_folio(mapping, wbc);
mm/page-writeback.c
2546
mapping->writeback_index = 0;
mm/page-writeback.c
2558
mapping->writeback_index = folio_next_index(folio);
mm/page-writeback.c
2564
int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
mm/page-writeback.c
2571
wb = inode_to_wb_wbc(mapping->host, wbc);
mm/page-writeback.c
2574
if (mapping->a_ops->writepages)
mm/page-writeback.c
2575
ret = mapping->a_ops->writepages(mapping, wbc);
mm/page-writeback.c
2605
bool noop_dirty_folio(struct address_space *mapping, struct folio *folio)
mm/page-writeback.c
2619
struct address_space *mapping)
mm/page-writeback.c
2621
struct inode *inode = mapping->host;
mm/page-writeback.c
2623
trace_writeback_dirty_folio(folio, mapping);
mm/page-writeback.c
2625
if (mapping_can_writeback(mapping)) {
mm/page-writeback.c
2674
void __folio_mark_dirty(struct folio *folio, struct address_space *mapping,
mm/page-writeback.c
2683
VM_WARN_ON_ONCE(folio_test_swapcache(folio) || shmem_mapping(mapping));
mm/page-writeback.c
2685
xa_lock_irqsave(&mapping->i_pages, flags);
mm/page-writeback.c
2686
if (folio->mapping) { /* Race with truncate? */
mm/page-writeback.c
2688
folio_account_dirtied(folio, mapping);
mm/page-writeback.c
2689
__xa_set_mark(&mapping->i_pages, folio->index,
mm/page-writeback.c
2692
xa_unlock_irqrestore(&mapping->i_pages, flags);
mm/page-writeback.c
2714
bool filemap_dirty_folio(struct address_space *mapping, struct folio *folio)
mm/page-writeback.c
2719
__folio_mark_dirty(folio, mapping, !folio_test_private(folio));
mm/page-writeback.c
2721
if (mapping->host) {
mm/page-writeback.c
2723
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
mm/page-writeback.c
2744
struct address_space *mapping = folio->mapping;
mm/page-writeback.c
2749
ret = filemap_dirty_folio(mapping, folio);
mm/page-writeback.c
2750
if (mapping && mapping_can_writeback(mapping)) {
mm/page-writeback.c
2751
struct inode *inode = mapping->host;
mm/page-writeback.c
2780
struct address_space *mapping = folio_mapping(folio);
mm/page-writeback.c
2782
if (likely(mapping)) {
mm/page-writeback.c
2796
return mapping->a_ops->dirty_folio(mapping, folio);
mm/page-writeback.c
2799
return noop_dirty_folio(mapping, folio);
mm/page-writeback.c
2839
struct address_space *mapping = folio_mapping(folio);
mm/page-writeback.c
2841
if (mapping_can_writeback(mapping)) {
mm/page-writeback.c
2842
struct inode *inode = mapping->host;
mm/page-writeback.c
2874
struct address_space *mapping = folio_mapping(folio);
mm/page-writeback.c
2879
if (mapping && mapping_can_writeback(mapping)) {
mm/page-writeback.c
2880
struct inode *inode = mapping->host;
mm/page-writeback.c
2959
struct address_space *mapping = folio_mapping(folio);
mm/page-writeback.c
2962
if (mapping && mapping_use_writeback_tags(mapping)) {
mm/page-writeback.c
2963
struct inode *inode = mapping->host;
mm/page-writeback.c
2967
xa_lock_irqsave(&mapping->i_pages, flags);
mm/page-writeback.c
2969
__xa_clear_mark(&mapping->i_pages, folio->index,
mm/page-writeback.c
2975
if (!mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) {
mm/page-writeback.c
2977
if (mapping->host)
mm/page-writeback.c
2978
sb_clear_inode_writeback(mapping->host);
mm/page-writeback.c
2981
xa_unlock_irqrestore(&mapping->i_pages, flags);
mm/page-writeback.c
2996
struct address_space *mapping = folio_mapping(folio);
mm/page-writeback.c
3002
if (mapping && mapping_use_writeback_tags(mapping)) {
mm/page-writeback.c
3003
XA_STATE(xas, &mapping->i_pages, folio->index);
mm/page-writeback.c
3004
struct inode *inode = mapping->host;
mm/page-writeback.c
3013
on_wblist = mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK);
mm/page-writeback.c
3025
if (mapping->host)
mm/page-writeback.c
3026
sb_mark_inode_writeback(mapping->host);
mm/page_alloc.c
1077
if (unlikely((unsigned long)page->mapping |
mm/page_alloc.c
1095
if (unlikely(page->mapping != NULL))
mm/page_alloc.c
1200
if (page->mapping != TAIL_MAPPING) {
mm/page_alloc.c
1216
page->mapping = NULL;
mm/page_alloc.c
1417
folio->mapping = NULL;
mm/page_io.c
473
struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
mm/page_io.c
477
ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
mm/page_io.c
662
struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
mm/page_io.c
666
ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
mm/page_io.c
82
struct address_space *mapping = swap_file->f_mapping;
mm/page_io.c
83
struct inode *inode = mapping->host;
mm/pagewalk.c
797
int walk_page_mapping(struct address_space *mapping, pgoff_t first_index,
mm/pagewalk.c
813
lockdep_assert_held(&mapping->i_mmap_rwsem);
mm/pagewalk.c
814
vma_interval_tree_foreach(vma, &mapping->i_mmap, first_index,
mm/readahead.c
142
file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
mm/readahead.c
144
ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages;
mm/readahead.c
151
const struct address_space_operations *aops = rac->mapping->a_ops;
mm/readahead.c
214
struct address_space *mapping = ractl->mapping;
mm/readahead.c
216
gfp_t gfp_mask = readahead_gfp_mask(mapping);
mm/readahead.c
218
unsigned int min_nrpages = mapping_min_folio_nrpages(mapping);
mm/readahead.c
232
lockdep_assert_held(&mapping->invalidate_lock);
mm/readahead.c
234
trace_page_cache_ra_unbounded(mapping->host, index, nr_to_read,
mm/readahead.c
236
index = mapping_align_index(mapping, index);
mm/readahead.c
259
struct folio *folio = xa_load(&mapping->i_pages, index + i);
mm/readahead.c
278
mapping_min_folio_order(mapping));
mm/readahead.c
282
ret = filemap_add_folio(mapping, folio, index + i, gfp_mask);
mm/readahead.c
318
struct address_space *mapping = ractl->mapping;
mm/readahead.c
320
loff_t isize = i_size_read(mapping->host);
mm/readahead.c
333
filemap_invalidate_lock_shared(mapping);
mm/readahead.c
335
filemap_invalidate_unlock_shared(mapping);
mm/readahead.c
345
struct address_space *mapping = ractl->mapping;
mm/readahead.c
347
struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
mm/readahead.c
350
if (unlikely(!mapping->a_ops->read_folio && !mapping->a_ops->readahead))
mm/readahead.c
456
err = filemap_add_folio(ractl->mapping, folio, index, gfp);
mm/readahead.c
470
struct address_space *mapping = ractl->mapping;
mm/readahead.c
473
unsigned int min_order = mapping_min_folio_order(mapping);
mm/readahead.c
474
pgoff_t limit = (i_size_read(mapping->host) - 1) >> PAGE_SHIFT;
mm/readahead.c
478
gfp_t gfp = readahead_gfp_mask(mapping);
mm/readahead.c
481
trace_page_cache_ra_order(mapping->host, start, ra);
mm/readahead.c
482
if (!mapping_large_folio_support(mapping)) {
mm/readahead.c
489
new_order = min(mapping_max_folio_order(mapping), new_order);
mm/readahead.c
497
filemap_invalidate_lock_shared(mapping);
mm/readahead.c
503
ractl->_index = mapping_align_index(mapping, index);
mm/readahead.c
522
filemap_invalidate_unlock_shared(mapping);
mm/readahead.c
545
struct backing_dev_info *bdi = inode_to_bdi(ractl->mapping->host);
mm/readahead.c
566
trace_page_cache_sync_ra(ractl->mapping->host, index, ra, req_count);
mm/readahead.c
606
miss = page_cache_prev_miss(ractl->mapping, index - 1, max_pages);
mm/readahead.c
651
trace_page_cache_async_ra(ractl->mapping->host, index, ra, req_count);
mm/readahead.c
681
start = page_cache_next_miss(ractl->mapping, index + 1, max_pages);
mm/readahead.c
769
struct address_space *mapping = ractl->mapping;
mm/readahead.c
772
gfp_t gfp_mask = readahead_gfp_mask(mapping);
mm/readahead.c
773
unsigned long min_nrpages = mapping_min_folio_nrpages(mapping);
mm/readahead.c
774
unsigned int min_order = mapping_min_folio_order(mapping);
mm/readahead.c
786
struct folio *folio = xa_load(&mapping->i_pages, index);
mm/readahead.c
795
index = mapping_align_index(mapping, index);
mm/readahead.c
796
if (filemap_add_folio(mapping, folio, index, gfp_mask) < 0) {
mm/readahead.c
815
struct folio *folio = xa_load(&mapping->i_pages, index);
mm/readahead.c
824
index = mapping_align_index(mapping, index);
mm/readahead.c
825
if (filemap_add_folio(mapping, folio, index, gfp_mask) < 0) {
mm/rmap.c
1196
struct address_space *mapping;
mm/rmap.c
1208
mapping = folio_mapping(folio);
mm/rmap.c
1209
if (!mapping)
mm/rmap.c
1243
static void __rmap_walk_file(struct folio *folio, struct address_space *mapping,
mm/rmap.c
1267
int mapping_wrprotect_range(struct address_space *mapping, pgoff_t pgoff,
mm/rmap.c
1282
if (!mapping)
mm/rmap.c
1285
__rmap_walk_file(/* folio = */NULL, mapping, pgoff, nr_pages, &rwc,
mm/rmap.c
1447
WRITE_ONCE(folio->mapping, anon_vma);
mm/rmap.c
1478
WRITE_ONCE(folio->mapping, (struct address_space *) anon_vma);
mm/rmap.c
3023
static void __rmap_walk_file(struct folio *folio, struct address_space *mapping,
mm/rmap.c
3030
VM_WARN_ON_FOLIO(folio && mapping != folio_mapping(folio), folio);
mm/rmap.c
3035
if (i_mmap_trylock_read(mapping))
mm/rmap.c
3043
i_mmap_lock_read(mapping);
mm/rmap.c
3046
vma_interval_tree_foreach(vma, &mapping->i_mmap,
mm/rmap.c
3063
i_mmap_unlock_read(mapping);
mm/rmap.c
3086
if (!folio->mapping)
mm/rmap.c
3089
__rmap_walk_file(folio, folio->mapping, folio->index,
mm/rmap.c
595
anon_mapping = (unsigned long)READ_ONCE(folio->mapping);
mm/rmap.c
643
anon_mapping = (unsigned long)READ_ONCE(folio->mapping);
mm/rmap.c
870
} else if (vma->vm_file->f_mapping != folio->mapping) {
mm/secretmem.c
107
filemap_invalidate_unlock_shared(mapping);
mm/secretmem.c
146
static int secretmem_migrate_folio(struct address_space *mapping,
mm/secretmem.c
168
struct address_space *mapping = inode->i_mapping;
mm/secretmem.c
172
filemap_invalidate_lock(mapping);
mm/secretmem.c
179
filemap_invalidate_unlock(mapping);
mm/secretmem.c
52
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
mm/secretmem.c
64
filemap_invalidate_lock_shared(mapping);
mm/secretmem.c
67
folio = filemap_lock_folio(mapping, offset);
mm/secretmem.c
83
err = filemap_add_folio(mapping, folio, offset, gfp);
mm/shmem.c
1000
XA_STATE(xas, &mapping->i_pages, start);
mm/shmem.c
1034
struct address_space *mapping = inode->i_mapping;
mm/shmem.c
1052
return shmem_partial_swap_usage(mapping, vma->vm_pgoff,
mm/shmem.c
1059
void shmem_unlock_mapping(struct address_space *mapping)
mm/shmem.c
1068
while (!mapping_unevictable(mapping) &&
mm/shmem.c
1069
filemap_get_folios(mapping, &index, ~0UL, &fbatch)) {
mm/shmem.c
1089
if (folio->mapping == inode->i_mapping)
mm/shmem.c
1111
struct address_space *mapping = inode->i_mapping;
mm/shmem.c
1131
while (index < end && find_lock_entries(mapping, &index, end - 1,
mm/shmem.c
1139
nr_swaps_freed += shmem_free_swap(mapping, indices[i],
mm/shmem.c
1145
truncate_inode_folio(mapping, folio);
mm/shmem.c
1193
if (!find_get_entries(mapping, &index, end - 1, &fbatch,
mm/shmem.c
1211
swaps_freed = shmem_free_swap(mapping, indices[i],
mm/shmem.c
1216
order = shmem_confirm_swap(mapping, indices[i],
mm/shmem.c
1239
if (folio_mapping(folio) != mapping) {
mm/shmem.c
1249
truncate_inode_folio(mapping, folio);
mm/shmem.c
1438
static unsigned int shmem_find_swap_entries(struct address_space *mapping,
mm/shmem.c
1442
XA_STATE(xas, &mapping->i_pages, start);
mm/shmem.c
1486
struct address_space *mapping = inode->i_mapping;
mm/shmem.c
1492
mapping_gfp_mask(mapping), NULL, NULL);
mm/shmem.c
1510
struct address_space *mapping = inode->i_mapping;
mm/shmem.c
1518
if (!shmem_find_swap_entries(mapping, start, &fbatch,
mm/shmem.c
1593
struct address_space *mapping = folio->mapping;
mm/shmem.c
1594
struct inode *inode = mapping->host;
mm/shmem.c
1713
error = shmem_add_to_page_cache(folio, mapping, index,
mm/shmem.c
1878
struct address_space *mapping, pgoff_t index,
mm/shmem.c
1905
if (!xa_find(&mapping->i_pages, &aligned_index,
mm/shmem.c
1915
struct address_space *mapping, pgoff_t index,
mm/shmem.c
1940
struct address_space *mapping = inode->i_mapping;
mm/shmem.c
1953
mapping, index, orders);
mm/shmem.c
1984
if (xa_find(&mapping->i_pages, &index,
mm/shmem.c
1998
error = shmem_add_to_page_cache(folio, mapping, index, NULL, gfp);
mm/shmem.c
2017
READ_ONCE(mapping->nrpages);
mm/shmem.c
2191
struct address_space *mapping = inode->i_mapping;
mm/shmem.c
2197
old = xa_cmpxchg_irq(&mapping->i_pages, index,
mm/shmem.c
2218
struct address_space *mapping = inode->i_mapping;
mm/shmem.c
2219
XA_STATE_ORDER(xas, &mapping->i_pages, index, 0);
mm/shmem.c
2267
__xa_store(&mapping->i_pages, aligned_index + i,
mm/shmem.c
2298
struct address_space *mapping = inode->i_mapping;
mm/shmem.c
2317
order = shmem_confirm_swap(mapping, index, index_entry);
mm/shmem.c
2403
shmem_confirm_swap(mapping, index, swap) < 0) {
mm/shmem.c
2425
error = shmem_add_to_page_cache(folio, mapping, index,
mm/shmem.c
2443
if (shmem_confirm_swap(mapping, index, swap) < 0)
mm/shmem.c
2513
if (unlikely(folio->mapping != inode->i_mapping)) {
mm/shmem.c
277
bool shmem_mapping(const struct address_space *mapping)
mm/shmem.c
279
return mapping->a_ops == &shmem_aops;
mm/shmem.c
3193
struct address_space *mapping = inode->i_mapping;
mm/shmem.c
3194
gfp_t gfp = mapping_gfp_mask(mapping);
mm/shmem.c
3276
ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL, gfp);
mm/shmem.c
3304
shmem_write_begin(const struct kiocb *iocb, struct address_space *mapping,
mm/shmem.c
3308
struct inode *inode = mapping->host;
mm/shmem.c
3342
shmem_write_end(const struct kiocb *iocb, struct address_space *mapping,
mm/shmem.c
3346
struct inode *inode = mapping->host;
mm/shmem.c
3370
struct address_space *mapping = inode->i_mapping;
mm/shmem.c
3433
if (mapping_writably_mapped(mapping)) {
mm/shmem.c
3558
struct address_space *mapping = inode->i_mapping;
mm/shmem.c
3628
if (mapping_writably_mapped(mapping)) {
mm/shmem.c
3667
struct address_space *mapping = file->f_mapping;
mm/shmem.c
3668
struct inode *inode = mapping->host;
mm/shmem.c
3678
offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence);
mm/shmem.c
3706
struct address_space *mapping = file->f_mapping;
mm/shmem.c
3725
unmap_mapping_range(mapping, unmap_start,
mm/shmem.c
472
struct address_space *mapping = inode->i_mapping;
mm/shmem.c
478
xa_lock_irq(&mapping->i_pages);
mm/shmem.c
479
mapping->nrpages += pages;
mm/shmem.c
480
xa_unlock_irq(&mapping->i_pages);
mm/shmem.c
497
static int shmem_replace_entry(struct address_space *mapping,
mm/shmem.c
500
XA_STATE(xas, &mapping->i_pages, index);
mm/shmem.c
520
static int shmem_confirm_swap(struct address_space *mapping, pgoff_t index,
mm/shmem.c
5207
static int shmem_error_remove_folio(struct address_space *mapping,
mm/shmem.c
523
XA_STATE(xas, &mapping->i_pages, index);
mm/shmem.c
5787
void shmem_unlock_mapping(struct address_space *mapping)
mm/shmem.c
5981
struct folio *shmem_read_folio_gfp(struct address_space *mapping,
mm/shmem.c
5985
struct inode *inode = mapping->host;
mm/shmem.c
6000
return mapping_read_folio_gfp(mapping, index, gfp);
mm/shmem.c
6005
struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
mm/shmem.c
6008
struct folio *folio = shmem_read_folio_gfp(mapping, index, gfp);
mm/shmem.c
882
struct address_space *mapping,
mm/shmem.c
885
XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio));
mm/shmem.c
895
folio->mapping = mapping;
mm/shmem.c
926
mapping->nrpages += nr;
mm/shmem.c
932
folio->mapping = NULL;
mm/shmem.c
945
struct address_space *mapping = folio->mapping;
mm/shmem.c
949
xa_lock_irq(&mapping->i_pages);
mm/shmem.c
950
error = shmem_replace_entry(mapping, folio->index, folio, radswap);
mm/shmem.c
951
folio->mapping = NULL;
mm/shmem.c
952
mapping->nrpages -= nr;
mm/shmem.c
954
xa_unlock_irq(&mapping->i_pages);
mm/shmem.c
964
static long shmem_free_swap(struct address_space *mapping,
mm/shmem.c
967
XA_STATE(xas, &mapping->i_pages, index);
mm/shmem.c
997
unsigned long shmem_partial_swap_usage(struct address_space *mapping,
mm/slub.c
3550
page->mapping = NULL;
mm/swapfile.c
2536
struct address_space *mapping = swap_file->f_mapping;
mm/swapfile.c
2539
if (mapping->a_ops->swap_deactivate)
mm/swapfile.c
2540
mapping->a_ops->swap_deactivate(swap_file);
mm/swapfile.c
2621
struct address_space *mapping = swap_file->f_mapping;
mm/swapfile.c
2622
struct inode *inode = mapping->host;
mm/swapfile.c
2631
if (mapping->a_ops->swap_activate) {
mm/swapfile.c
2632
ret = mapping->a_ops->swap_activate(sis, swap_file, span);
mm/swapfile.c
2776
struct address_space *mapping;
mm/swapfile.c
2791
mapping = victim->f_mapping;
mm/swapfile.c
2795
if (p->swap_file->f_mapping == mapping) {
mm/swapfile.c
2884
inode = mapping->host;
mm/swapfile.c
3332
struct address_space *mapping;
mm/swapfile.c
3369
mapping = swap_file->f_mapping;
mm/swapfile.c
3371
inode = mapping->host;
mm/swapfile.c
3391
if (mapping_min_folio_order(mapping) > 0) {
mm/swapfile.c
3399
if (!mapping->a_ops->read_folio) {
mm/swapfile.c
3403
folio = read_mapping_folio(mapping, 0, swap_file);
mm/truncate.c
104
spin_lock(&mapping->host->i_lock);
mm/truncate.c
113
if (mapping_shrinkable(mapping))
mm/truncate.c
114
inode_lru_list_add(mapping->host);
mm/truncate.c
115
spin_unlock(&mapping->host->i_lock);
mm/truncate.c
137
const struct address_space_operations *aops = folio->mapping->a_ops;
mm/truncate.c
170
int truncate_inode_folio(struct address_space *mapping, struct folio *folio)
mm/truncate.c
172
if (folio->mapping != mapping)
mm/truncate.c
198
if (ret && !shmem_mapping(folio->mapping)) {
mm/truncate.c
236
truncate_inode_folio(folio->mapping, folio);
mm/truncate.c
245
if (!mapping_inaccessible(folio->mapping))
mm/truncate.c
253
min_order = mapping_min_folio_order(folio->mapping);
mm/truncate.c
26
static void clear_shadow_entries(struct address_space *mapping,
mm/truncate.c
281
folio2->mapping == folio->mapping)
mm/truncate.c
29
XA_STATE(xas, &mapping->i_pages, start);
mm/truncate.c
292
truncate_inode_folio(folio->mapping, folio);
mm/truncate.c
299
int generic_error_remove_folio(struct address_space *mapping,
mm/truncate.c
302
if (!mapping)
mm/truncate.c
308
if (!S_ISREG(mapping->host->i_mode))
mm/truncate.c
310
return truncate_inode_folio(mapping, folio);
mm/truncate.c
325
long mapping_evict_folio(struct address_space *mapping, struct folio *folio)
mm/truncate.c
328
if (!mapping)
mm/truncate.c
33
if (shmem_mapping(mapping) || dax_mapping(mapping))
mm/truncate.c
339
return remove_mapping(mapping, folio);
mm/truncate.c
366
void truncate_inode_pages_range(struct address_space *mapping,
mm/truncate.c
378
if (mapping_empty(mapping))
mm/truncate.c
38
spin_lock(&mapping->host->i_lock);
mm/truncate.c
400
while (index < end && find_lock_entries(mapping, &index, end - 1,
mm/truncate.c
402
truncate_folio_batch_exceptionals(mapping, &fbatch, indices);
mm/truncate.c
405
delete_from_page_cache_batch(mapping, &fbatch);
mm/truncate.c
413
folio = __filemap_get_folio(mapping, lstart >> PAGE_SHIFT, FGP_LOCK, 0);
mm/truncate.c
427
folio = __filemap_get_folio(mapping, lend >> PAGE_SHIFT,
mm/truncate.c
440
if (!find_get_entries(mapping, &index, end - 1, &fbatch,
mm/truncate.c
461
truncate_inode_folio(mapping, folio);
mm/truncate.c
464
truncate_folio_batch_exceptionals(mapping, &fbatch, indices);
mm/truncate.c
48
if (mapping_shrinkable(mapping))
mm/truncate.c
483
void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
mm/truncate.c
485
truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
mm/truncate.c
49
inode_lru_list_add(mapping->host);
mm/truncate.c
498
void truncate_inode_pages_final(struct address_space *mapping)
mm/truncate.c
50
spin_unlock(&mapping->host->i_lock);
mm/truncate.c
507
mapping_set_exiting(mapping);
mm/truncate.c
509
if (!mapping_empty(mapping)) {
mm/truncate.c
516
xa_lock_irq(&mapping->i_pages);
mm/truncate.c
517
xa_unlock_irq(&mapping->i_pages);
mm/truncate.c
520
truncate_inode_pages(mapping, 0);
mm/truncate.c
534
unsigned long mapping_try_invalidate(struct address_space *mapping,
mm/truncate.c
545
while (find_lock_entries(mapping, &index, end, &fbatch, indices)) {
mm/truncate.c
560
ret = mapping_evict_folio(mapping, folio);
mm/truncate.c
576
clear_shadow_entries(mapping, indices[0], indices[nr-1]);
mm/truncate.c
599
unsigned long invalidate_mapping_pages(struct address_space *mapping,
mm/truncate.c
60
static void truncate_folio_batch_exceptionals(struct address_space *mapping,
mm/truncate.c
602
return mapping_try_invalidate(mapping, start, end, NULL);
mm/truncate.c
606
static int folio_launder(struct address_space *mapping, struct folio *folio)
mm/truncate.c
610
if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL)
mm/truncate.c
612
return mapping->a_ops->launder_folio(folio);
mm/truncate.c
622
int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
mm/truncate.c
63
XA_STATE(xas, &mapping->i_pages, indices[0]);
mm/truncate.c
633
ret = folio_launder(mapping, folio);
mm/truncate.c
636
if (folio->mapping != mapping)
mm/truncate.c
641
spin_lock(&mapping->host->i_lock);
mm/truncate.c
642
xa_lock_irq(&mapping->i_pages);
mm/truncate.c
648
xa_unlock_irq(&mapping->i_pages);
mm/truncate.c
649
if (mapping_shrinkable(mapping))
mm/truncate.c
650
inode_lru_list_add(mapping->host);
mm/truncate.c
651
spin_unlock(&mapping->host->i_lock);
mm/truncate.c
653
filemap_free_folio(mapping, folio);
mm/truncate.c
656
xa_unlock_irq(&mapping->i_pages);
mm/truncate.c
657
spin_unlock(&mapping->host->i_lock);
mm/truncate.c
672
int invalidate_inode_pages2_range(struct address_space *mapping,
mm/truncate.c
683
if (mapping_empty(mapping))
mm/truncate.c
688
while (find_get_entries(mapping, &index, end, &fbatch, indices)) {
mm/truncate.c
69
if (shmem_mapping(mapping))
mm/truncate.c
699
if (dax_mapping(mapping) &&
mm/truncate.c
700
!dax_invalidate_mapping_entry_sync(mapping, indices[i]))
mm/truncate.c
710
unmap_mapping_pages(mapping, indices[i],
mm/truncate.c
716
if (unlikely(folio->mapping != mapping)) {
mm/truncate.c
722
ret2 = folio_unmap_invalidate(mapping, folio, GFP_KERNEL);
mm/truncate.c
729
clear_shadow_entries(mapping, indices[0], indices[nr-1]);
mm/truncate.c
742
if (dax_mapping(mapping)) {
mm/truncate.c
743
unmap_mapping_pages(mapping, start, end - start + 1, false);
mm/truncate.c
758
int invalidate_inode_pages2(struct address_space *mapping)
mm/truncate.c
760
return invalidate_inode_pages2_range(mapping, 0, -1);
mm/truncate.c
781
struct address_space *mapping = inode->i_mapping;
mm/truncate.c
79
if (dax_mapping(mapping)) {
mm/truncate.c
793
unmap_mapping_range(mapping, holebegin, 0, 1);
mm/truncate.c
794
truncate_inode_pages(mapping, newsize);
mm/truncate.c
795
unmap_mapping_range(mapping, holebegin, 0, 1);
mm/truncate.c
902
struct address_space *mapping = inode->i_mapping;
mm/truncate.c
919
unmap_mapping_range(mapping, unmap_start,
mm/truncate.c
921
truncate_inode_pages_range(mapping, lstart, lend);
mm/truncate.c
95
dax_delete_mapping_entry(mapping, indices[i]);
mm/userfaultfd.c
509
struct address_space *mapping;
mm/userfaultfd.c
577
mapping = dst_vma->vm_file->f_mapping;
mm/userfaultfd.c
578
hash = hugetlb_fault_mutex_hash(mapping, idx);
mm/util.c
673
unsigned long mapping = (unsigned long)folio->mapping;
mm/util.c
675
if ((mapping & FOLIO_MAPPING_FLAGS) != FOLIO_MAPPING_ANON)
mm/util.c
677
return (void *)(mapping - FOLIO_MAPPING_ANON);
mm/util.c
694
struct address_space *mapping;
mm/util.c
703
mapping = folio->mapping;
mm/util.c
704
if ((unsigned long)mapping & FOLIO_MAPPING_FLAGS)
mm/util.c
707
return mapping;
mm/vma.c
176
vp->mapping = vma->vm_file->f_mapping;
mm/vma.c
1776
struct address_space *mapping;
mm/vma.c
1779
mapping = vb->vmas[0]->vm_file->f_mapping;
mm/vma.c
1780
i_mmap_lock_write(mapping);
mm/vma.c
1782
VM_WARN_ON_ONCE(vb->vmas[i]->vm_file->f_mapping != mapping);
mm/vma.c
1783
__remove_shared_vm_struct(vb->vmas[i], mapping);
mm/vma.c
1785
i_mmap_unlock_write(mapping);
mm/vma.c
1813
struct address_space *mapping;
mm/vma.c
1816
mapping = file->f_mapping;
mm/vma.c
1817
i_mmap_lock_write(mapping);
mm/vma.c
1818
__vma_link_file(vma, mapping);
mm/vma.c
1820
i_mmap_unlock_write(mapping);
mm/vma.c
2141
static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
mm/vma.c
2143
if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
mm/vma.c
2153
if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
mm/vma.c
2155
down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock);
mm/vma.c
2275
static void vm_unlock_mapping(struct address_space *mapping)
mm/vma.c
2277
if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
mm/vma.c
228
struct address_space *mapping)
mm/vma.c
2282
i_mmap_unlock_write(mapping);
mm/vma.c
2284
&mapping->flags))
mm/vma.c
231
mapping_allow_writable(mapping);
mm/vma.c
233
flush_dcache_mmap_lock(mapping);
mm/vma.c
234
vma_interval_tree_insert(vma, &mapping->i_mmap);
mm/vma.c
235
flush_dcache_mmap_unlock(mapping);
mm/vma.c
242
struct address_space *mapping)
mm/vma.c
245
mapping_unmap_writable(mapping);
mm/vma.c
247
flush_dcache_mmap_lock(mapping);
mm/vma.c
248
vma_interval_tree_remove(vma, &mapping->i_mmap);
mm/vma.c
249
flush_dcache_mmap_unlock(mapping);
mm/vma.c
297
i_mmap_lock_write(vp->mapping);
mm/vma.c
318
flush_dcache_mmap_lock(vp->mapping);
mm/vma.c
319
vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap);
mm/vma.c
322
&vp->mapping->i_mmap);
mm/vma.c
341
&vp->mapping->i_mmap);
mm/vma.c
342
vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap);
mm/vma.c
343
flush_dcache_mmap_unlock(vp->mapping);
mm/vma.c
347
__remove_shared_vm_struct(vp->remove, vp->mapping);
mm/vma.c
349
__remove_shared_vm_struct(vp->remove2, vp->mapping);
mm/vma.c
368
i_mmap_unlock_write(vp->mapping);
mm/vma.h
17
struct address_space *mapping;
mm/vmscan.c
1103
struct address_space *mapping;
mm/vmscan.c
1213
mapping = folio_mapping(folio);
mm/vmscan.c
1226
(mapping &&
mm/vmscan.c
1227
mapping_writeback_may_deadlock_on_reclaim(mapping))) {
mm/vmscan.c
1395
mapping = folio_mapping(folio);
mm/vmscan.c
1425
switch (pageout(folio, mapping, &plug, folio_list)) {
mm/vmscan.c
1460
mapping = folio_mapping(folio);
mm/vmscan.c
1493
if (!mapping && folio_ref_count(folio) == 1) {
mm/vmscan.c
1525
} else if (!mapping || !__remove_mapping(mapping, folio, true,
mm/vmscan.c
3327
struct address_space *mapping;
mm/vmscan.c
3352
mapping = vma->vm_file->f_mapping;
mm/vmscan.c
3353
if (mapping_unevictable(mapping))
mm/vmscan.c
3356
if (shmem_mapping(mapping))
mm/vmscan.c
3363
return !mapping->a_ops->read_folio;
mm/vmscan.c
493
static void handle_write_error(struct address_space *mapping,
mm/vmscan.c
497
if (folio_mapping(folio) == mapping)
mm/vmscan.c
498
mapping_set_error(mapping, error);
mm/vmscan.c
637
static pageout_t writeout(struct folio *folio, struct address_space *mapping,
mm/vmscan.c
649
if (shmem_mapping(mapping))
mm/vmscan.c
655
handle_write_error(mapping, folio, res);
mm/vmscan.c
673
static pageout_t pageout(struct folio *folio, struct address_space *mapping,
mm/vmscan.c
693
if (folio_ref_count(folio) != 1 + folio_nr_pages(folio) || !mapping)
mm/vmscan.c
695
if (!shmem_mapping(mapping) && !folio_test_anon(folio))
mm/vmscan.c
699
return writeout(folio, mapping, plug, folio_list);
mm/vmscan.c
706
static int __remove_mapping(struct address_space *mapping, struct folio *folio,
mm/vmscan.c
714
BUG_ON(mapping != folio_mapping(folio));
mm/vmscan.c
719
spin_lock(&mapping->host->i_lock);
mm/vmscan.c
720
xa_lock_irq(&mapping->i_pages);
mm/vmscan.c
760
if (reclaimed && !mapping_exiting(mapping))
mm/vmscan.c
768
free_folio = mapping->a_ops->free_folio;
mm/vmscan.c
786
!mapping_exiting(mapping) && !dax_mapping(mapping))
mm/vmscan.c
789
xa_unlock_irq(&mapping->i_pages);
mm/vmscan.c
790
if (mapping_shrinkable(mapping))
mm/vmscan.c
791
inode_lru_list_add(mapping->host);
mm/vmscan.c
792
spin_unlock(&mapping->host->i_lock);
mm/vmscan.c
804
xa_unlock_irq(&mapping->i_pages);
mm/vmscan.c
805
spin_unlock(&mapping->host->i_lock);
mm/vmscan.c
822
long remove_mapping(struct address_space *mapping, struct folio *folio)
mm/vmscan.c
824
if (__remove_mapping(mapping, folio, false, NULL)) {
mm/vmscan.c
957
struct address_space *mapping;
mm/vmscan.c
981
mapping = folio_mapping(folio);
mm/vmscan.c
982
if (mapping && mapping->a_ops->is_dirty_writeback)
mm/vmscan.c
983
mapping->a_ops->is_dirty_writeback(folio, dirty, writeback);
mm/workingset.c
703
struct address_space *mapping;
mm/workingset.c
718
mapping = container_of(node->array, struct address_space, i_pages);
mm/workingset.c
721
if (!xa_trylock(&mapping->i_pages)) {
mm/workingset.c
728
if (mapping->host != NULL) {
mm/workingset.c
729
if (!spin_trylock(&mapping->host->i_lock)) {
mm/workingset.c
730
xa_unlock(&mapping->i_pages);
mm/workingset.c
755
xa_unlock_irq(&mapping->i_pages);
mm/workingset.c
756
if (mapping->host != NULL) {
mm/workingset.c
757
if (mapping_shrinkable(mapping))
mm/workingset.c
758
inode_lru_list_add(mapping->host);
mm/workingset.c
759
spin_unlock(&mapping->host->i_lock);
mm/zpdesc.h
56
ZPDESC_MATCH(mapping, movable_ops);
net/bluetooth/cmtp/capi.c
115
if (app->mapping == value)
net/bluetooth/cmtp/capi.c
209
application->mapping = CAPIMSG_APPID(skb->data);
net/bluetooth/cmtp/capi.c
447
cmtp_send_interopmsg(session, CAPI_REQ, application->mapping, application->msgnum,
net/bluetooth/cmtp/capi.c
474
CAPIMSG_SETAPPID(skb->data, application->mapping);
net/bluetooth/cmtp/capi.c
502
seq_printf(m, "appl %u -> %u\n", app->appl, app->mapping);
net/bluetooth/cmtp/cmtp.h
110
__u16 mapping;
net/ipv4/tcp.c
1942
if (PageCompound(page) || page->mapping)
net/rds/ib_rdma.c
254
WARN_ON(!page->mapping && irqs_disabled());
net/sched/sch_generic.c
211
int mapping = skb_get_queue_mapping(skb);
net/sched/sch_generic.c
219
if (unlikely(skb_get_queue_mapping(nskb) != mapping)) {
net/sunrpc/cache.c
934
static ssize_t cache_downcall(struct address_space *mapping,
net/sunrpc/cache.c
960
struct address_space *mapping = filp->f_mapping;
net/sunrpc/cache.c
968
ret = cache_downcall(mapping, buf, count, cd);
security/selinux/ss/services.c
111
out_map->mapping = kzalloc_objs(*out_map->mapping, ++i, GFP_ATOMIC);
security/selinux/ss/services.c
112
if (!out_map->mapping)
security/selinux/ss/services.c
119
struct selinux_mapping *p_out = out_map->mapping + j;
security/selinux/ss/services.c
168
kfree(out_map->mapping);
security/selinux/ss/services.c
169
out_map->mapping = NULL;
security/selinux/ss/services.c
180
return map->mapping[tclass].value;
security/selinux/ss/services.c
193
if (map->mapping[i].value == pol_value)
security/selinux/ss/services.c
205
struct selinux_mapping *mapping = &map->mapping[tclass];
security/selinux/ss/services.c
206
unsigned int i, n = mapping->num_perms;
security/selinux/ss/services.c
210
if (avd->allowed & mapping->perms[i])
security/selinux/ss/services.c
212
if (allow_unknown && !mapping->perms[i])
security/selinux/ss/services.c
218
if (avd->auditallow & mapping->perms[i])
security/selinux/ss/services.c
2203
kfree(policy->map.mapping);
security/selinux/ss/services.c
223
if (avd->auditdeny & mapping->perms[i])
security/selinux/ss/services.c
225
if (!allow_unknown && !mapping->perms[i])
security/selinux/ss/services.c
2392
kfree(newpolicy->map.mapping);
security/selinux/ss/services.h
22
struct selinux_mapping *mapping; /* indexed by class */
sound/soc/sof/intel/hda-dai.c
592
dma_config->dma_stream_channel_map.mapping[0].device = data.dai_index;
sound/soc/sof/intel/hda-dai.c
593
dma_config->dma_stream_channel_map.mapping[0].channel_mask = ch_mask;
sound/soc/sof/ipc4-topology.c
2353
blob->alh_cfg.mapping[i].device = SOF_IPC4_NODE_TYPE(node_type);
sound/soc/sof/ipc4-topology.c
2354
blob->alh_cfg.mapping[i].device |=
sound/soc/sof/ipc4-topology.c
2364
blob->alh_cfg.mapping[i].device =
sound/soc/sof/ipc4-topology.c
2365
dma_config->dma_stream_channel_map.mapping[0].device;
sound/soc/sof/ipc4-topology.c
2380
blob->alh_cfg.mapping[i].channel_mask = mask << (step * i);
sound/soc/sof/ipc4-topology.c
851
sizeof(*blob->alh_cfg.mapping) *
sound/soc/sof/ipc4-topology.h
267
struct sof_ipc4_dma_device_stream_ch_map mapping[SOF_IPC4_DMA_DEVICE_MAX_COUNT];
tools/testing/nvdimm/test/ndtest.c
147
.mapping = region0_mapping,
tools/testing/nvdimm/test/ndtest.c
154
.mapping = region1_mapping,
tools/testing/nvdimm/test/ndtest.c
173
.mapping = region6_mapping,
tools/testing/nvdimm/test/ndtest.c
418
int i, ndimm = region->mapping[0].dimm;
tools/testing/nvdimm/test/ndtest.c
430
ndr_desc->mapping = mappings;
tools/testing/nvdimm/test/ndtest.c
450
ndimm = region->mapping[i].dimm;
tools/testing/nvdimm/test/ndtest.c
451
mappings[i].start = region->mapping[i].start;
tools/testing/nvdimm/test/ndtest.c
452
mappings[i].size = region->mapping[i].size;
tools/testing/nvdimm/test/ndtest.c
453
mappings[i].position = region->mapping[i].position;
tools/testing/nvdimm/test/ndtest.h
63
struct ndtest_mapping *mapping;
tools/testing/nvdimm/test/nfit.c
689
nd_mapping = &nd_region->mapping[nd_region->ndr_mappings - 1];
tools/testing/selftests/arm64/mte/check_buffer_fill.c
365
static int check_memory_initial_tags(int mem_type, int mode, int mapping)
tools/testing/selftests/arm64/mte/check_buffer_fill.c
374
ptr = (char *)mte_allocate_memory(sizes[run], mem_type, mapping, false);
tools/testing/selftests/arm64/mte/check_buffer_fill.c
387
ptr = (char *)mte_allocate_file_memory(sizes[run], mem_type, mapping, false, fd);
tools/testing/selftests/arm64/mte/check_child_memory.c
106
static int check_child_file_mapping(int mem_type, int mode, int mapping)
tools/testing/selftests/arm64/mte/check_child_memory.c
119
map_ptr = (char *)mte_allocate_file_memory(map_size, mem_type, mapping, false, fd);
tools/testing/selftests/arm64/mte/check_child_memory.c
84
static int check_child_memory_mapping(int mem_type, int mode, int mapping)
tools/testing/selftests/arm64/mte/check_child_memory.c
93
ptr = (char *)mte_allocate_memory_tag_range(sizes[run], mem_type, mapping,
tools/testing/selftests/arm64/mte/check_hugetlb_options.c
146
static int check_hugetlb_memory_mapping(int mem_type, int mode, int mapping, int tag_check)
tools/testing/selftests/arm64/mte/check_hugetlb_options.c
155
map_ptr = (char *)mte_allocate_memory(map_size, mem_type, mapping, false);
tools/testing/selftests/arm64/mte/check_hugetlb_options.c
176
static int check_clear_prot_mte_flag(int mem_type, int mode, int mapping)
tools/testing/selftests/arm64/mte/check_hugetlb_options.c
185
map_ptr = (char *)mte_allocate_memory_tag_range(map_size, mem_type, mapping,
tools/testing/selftests/arm64/mte/check_hugetlb_options.c
205
static int check_child_hugetlb_memory_mapping(int mem_type, int mode, int mapping)
tools/testing/selftests/arm64/mte/check_hugetlb_options.c
214
ptr = (char *)mte_allocate_memory_tag_range(map_size, mem_type, mapping,
tools/testing/selftests/arm64/mte/check_ksm_options.c
110
ptr = mte_allocate_memory(TEST_UNIT * page_sz, mem_type, mapping, true);
tools/testing/selftests/arm64/mte/check_ksm_options.c
98
static int check_madvise_options(int mem_type, int mode, int mapping)
tools/testing/selftests/arm64/mte/check_mmap_options.c
113
static int check_anonymous_memory_mapping(int mem_type, int mode, int mapping,
tools/testing/selftests/arm64/mte/check_mmap_options.c
126
map_ptr = (char *)mte_allocate_memory(map_size, mem_type, mapping, false);
tools/testing/selftests/arm64/mte/check_mmap_options.c
148
static int check_file_memory_mapping(int mem_type, int mode, int mapping,
tools/testing/selftests/arm64/mte/check_mmap_options.c
166
map_ptr = (char *)mte_allocate_file_memory(map_size, mem_type, mapping, false, fd);
tools/testing/selftests/arm64/mte/check_mmap_options.c
191
static int check_clear_prot_mte_flag(int mem_type, int mode, int mapping, int atag_check)
tools/testing/selftests/arm64/mte/check_mmap_options.c
201
ptr = (char *)mte_allocate_memory_tag_range(sizes[run], mem_type, mapping,
tools/testing/selftests/arm64/mte/check_mmap_options.c
222
ptr = (char *)mte_allocate_file_memory_tag_range(sizes[run], mem_type, mapping,
tools/testing/selftests/arm64/mte/check_mmap_options.c
300
switch (tc->mapping) {
tools/testing/selftests/arm64/mte/check_mmap_options.c
370
.mapping = MAP_PRIVATE,
tools/testing/selftests/arm64/mte/check_mmap_options.c
380
.mapping = MAP_PRIVATE,
tools/testing/selftests/arm64/mte/check_mmap_options.c
390
.mapping = MAP_PRIVATE,
tools/testing/selftests/arm64/mte/check_mmap_options.c
400
.mapping = MAP_PRIVATE,
tools/testing/selftests/arm64/mte/check_mmap_options.c
410
.mapping = MAP_PRIVATE,
tools/testing/selftests/arm64/mte/check_mmap_options.c
420
.mapping = MAP_PRIVATE,
tools/testing/selftests/arm64/mte/check_mmap_options.c
430
.mapping = MAP_SHARED,
tools/testing/selftests/arm64/mte/check_mmap_options.c
440
.mapping = MAP_SHARED,
tools/testing/selftests/arm64/mte/check_mmap_options.c
450
.mapping = MAP_PRIVATE,
tools/testing/selftests/arm64/mte/check_mmap_options.c
460
.mapping = MAP_PRIVATE,
tools/testing/selftests/arm64/mte/check_mmap_options.c
47
int mapping;
tools/testing/selftests/arm64/mte/check_mmap_options.c
470
.mapping = MAP_SHARED,
tools/testing/selftests/arm64/mte/check_mmap_options.c
480
.mapping = MAP_SHARED,
tools/testing/selftests/arm64/mte/check_mmap_options.c
490
.mapping = MAP_PRIVATE,
tools/testing/selftests/arm64/mte/check_mmap_options.c
500
.mapping = MAP_PRIVATE,
tools/testing/selftests/arm64/mte/check_mmap_options.c
510
.mapping = MAP_SHARED,
tools/testing/selftests/arm64/mte/check_mmap_options.c
520
.mapping = MAP_SHARED,
tools/testing/selftests/arm64/mte/check_mmap_options.c
530
.mapping = MAP_PRIVATE,
tools/testing/selftests/arm64/mte/check_mmap_options.c
540
.mapping = MAP_PRIVATE,
tools/testing/selftests/arm64/mte/check_mmap_options.c
550
.mapping = MAP_SHARED,
tools/testing/selftests/arm64/mte/check_mmap_options.c
560
.mapping = MAP_SHARED,
tools/testing/selftests/arm64/mte/check_mmap_options.c
570
.mapping = MAP_PRIVATE,
tools/testing/selftests/arm64/mte/check_mmap_options.c
580
.mapping = MAP_PRIVATE,
tools/testing/selftests/arm64/mte/check_mmap_options.c
590
.mapping = MAP_PRIVATE,
tools/testing/selftests/arm64/mte/check_mmap_options.c
600
.mapping = MAP_PRIVATE,
tools/testing/selftests/arm64/mte/check_mmap_options.c
610
.mapping = MAP_SHARED,
tools/testing/selftests/arm64/mte/check_mmap_options.c
620
.mapping = MAP_SHARED,
tools/testing/selftests/arm64/mte/check_mmap_options.c
630
.mapping = MAP_PRIVATE,
tools/testing/selftests/arm64/mte/check_mmap_options.c
640
.mapping = MAP_PRIVATE,
tools/testing/selftests/arm64/mte/check_mmap_options.c
650
.mapping = MAP_SHARED,
tools/testing/selftests/arm64/mte/check_mmap_options.c
660
.mapping = MAP_SHARED,
tools/testing/selftests/arm64/mte/check_mmap_options.c
670
.mapping = MAP_PRIVATE,
tools/testing/selftests/arm64/mte/check_mmap_options.c
680
.mapping = MAP_PRIVATE,
tools/testing/selftests/arm64/mte/check_mmap_options.c
690
.mapping = MAP_PRIVATE,
tools/testing/selftests/arm64/mte/check_mmap_options.c
700
.mapping = MAP_SHARED,
tools/testing/selftests/arm64/mte/check_mmap_options.c
710
.mapping = MAP_SHARED,
tools/testing/selftests/arm64/mte/check_mmap_options.c
720
.mapping = MAP_PRIVATE,
tools/testing/selftests/arm64/mte/check_mmap_options.c
730
.mapping = MAP_PRIVATE,
tools/testing/selftests/arm64/mte/check_mmap_options.c
740
.mapping = MAP_SHARED,
tools/testing/selftests/arm64/mte/check_mmap_options.c
750
.mapping = MAP_SHARED,
tools/testing/selftests/arm64/mte/check_mmap_options.c
760
.mapping = MAP_PRIVATE,
tools/testing/selftests/arm64/mte/check_mmap_options.c
770
.mapping = MAP_PRIVATE,
tools/testing/selftests/arm64/mte/check_mmap_options.c
780
.mapping = MAP_SHARED,
tools/testing/selftests/arm64/mte/check_mmap_options.c
790
.mapping = MAP_SHARED,
tools/testing/selftests/arm64/mte/check_mmap_options.c
800
.mapping = MAP_PRIVATE,
tools/testing/selftests/arm64/mte/check_mmap_options.c
810
.mapping = MAP_PRIVATE,
tools/testing/selftests/arm64/mte/check_mmap_options.c
820
.mapping = MAP_SHARED,
tools/testing/selftests/arm64/mte/check_mmap_options.c
830
.mapping = MAP_SHARED,
tools/testing/selftests/arm64/mte/check_mmap_options.c
840
.mapping = MAP_PRIVATE,
tools/testing/selftests/arm64/mte/check_mmap_options.c
850
.mapping = MAP_PRIVATE,
tools/testing/selftests/arm64/mte/check_mmap_options.c
860
.mapping = MAP_SHARED,
tools/testing/selftests/arm64/mte/check_mmap_options.c
870
.mapping = MAP_SHARED,
tools/testing/selftests/arm64/mte/check_mmap_options.c
880
.mapping = MAP_PRIVATE,
tools/testing/selftests/arm64/mte/check_mmap_options.c
890
.mapping = MAP_PRIVATE,
tools/testing/selftests/arm64/mte/check_mmap_options.c
900
.mapping = MAP_SHARED,
tools/testing/selftests/arm64/mte/check_mmap_options.c
910
.mapping = MAP_SHARED,
tools/testing/selftests/arm64/mte/check_mmap_options.c
920
.mapping = MAP_PRIVATE,
tools/testing/selftests/arm64/mte/check_mmap_options.c
930
.mapping = MAP_PRIVATE,
tools/testing/selftests/arm64/mte/check_mmap_options.c
940
.mapping = MAP_PRIVATE,
tools/testing/selftests/arm64/mte/check_mmap_options.c
979
test_cases[i].mapping,
tools/testing/selftests/arm64/mte/check_mmap_options.c
988
test_cases[i].mapping,
tools/testing/selftests/arm64/mte/check_mmap_options.c
997
test_cases[i].mapping,
tools/testing/selftests/arm64/mte/check_user_mem.c
34
static int check_usermem_access_fault(int mem_type, int mode, int mapping,
tools/testing/selftests/arm64/mte/check_user_mem.c
55
ptr = mte_allocate_memory(len, mem_type, mapping, true);
tools/testing/selftests/arm64/mte/mte_common_util.c
167
static void *__mte_allocate_memory_range(size_t size, int mem_type, int mapping,
tools/testing/selftests/arm64/mte/mte_common_util.c
190
map_flag = mapping;
tools/testing/selftests/arm64/mte/mte_common_util.c
193
if (!(mapping & MAP_SHARED))
tools/testing/selftests/arm64/mte/mte_common_util.c
212
void *mte_allocate_memory_tag_range(size_t size, int mem_type, int mapping,
tools/testing/selftests/arm64/mte/mte_common_util.c
215
return __mte_allocate_memory_range(size, mem_type, mapping, range_before,
tools/testing/selftests/arm64/mte/mte_common_util.c
219
void *mte_allocate_memory(size_t size, int mem_type, int mapping, bool tags)
tools/testing/selftests/arm64/mte/mte_common_util.c
221
return __mte_allocate_memory_range(size, mem_type, mapping, 0, 0, tags, -1);
tools/testing/selftests/arm64/mte/mte_common_util.c
224
void *mte_allocate_file_memory(size_t size, int mem_type, int mapping, bool tags, int fd)
tools/testing/selftests/arm64/mte/mte_common_util.c
246
return __mte_allocate_memory_range(size, mem_type, mapping, 0, 0, tags, fd);
tools/testing/selftests/arm64/mte/mte_common_util.c
249
void *mte_allocate_file_memory_tag_range(size_t size, int mem_type, int mapping,
tools/testing/selftests/arm64/mte/mte_common_util.c
272
return __mte_allocate_memory_range(size, mem_type, mapping, range_before,
tools/testing/selftests/arm64/mte/mte_common_util.h
48
void *mte_allocate_memory(size_t size, int mem_type, int mapping, bool tags);
tools/testing/selftests/arm64/mte/mte_common_util.h
49
void *mte_allocate_memory_tag_range(size_t size, int mem_type, int mapping,
tools/testing/selftests/arm64/mte/mte_common_util.h
51
void *mte_allocate_file_memory(size_t size, int mem_type, int mapping,
tools/testing/selftests/arm64/mte/mte_common_util.h
53
void *mte_allocate_file_memory_tag_range(size_t size, int mem_type, int mapping,
tools/testing/selftests/mm/ksm_tests.c
173
static void *allocate_memory(void *ptr, int prot, int mapping, char data, size_t map_size)
tools/testing/selftests/mm/ksm_tests.c
175
void *map_ptr = mmap(ptr, map_size, PROT_WRITE, mapping, -1, 0);
tools/testing/selftests/mm/ksm_tests.c
320
static int check_ksm_merge(int merge_type, int mapping, int prot,
tools/testing/selftests/mm/ksm_tests.c
332
map_ptr = allocate_memory(NULL, prot, mapping, '*', page_size * page_count);
tools/testing/selftests/mm/ksm_tests.c
354
static int check_ksm_unmerge(int merge_type, int mapping, int prot, int timeout, size_t page_size)
tools/testing/selftests/mm/ksm_tests.c
366
map_ptr = allocate_memory(NULL, prot, mapping, '*', page_size * page_count);
tools/testing/selftests/mm/ksm_tests.c
394
static int check_ksm_zero_page_merge(int merge_type, int mapping, int prot, long page_count,
tools/testing/selftests/mm/ksm_tests.c
409
map_ptr = allocate_memory(NULL, prot, mapping, 0, page_size * page_count);
tools/testing/selftests/mm/ksm_tests.c
459
static int check_ksm_numa_merge(int merge_type, int mapping, int prot, int timeout,
tools/testing/selftests/mm/ksm_tests.c
523
static int ksm_merge_hugepages_time(int merge_type, int mapping, int prot,
tools/testing/selftests/mm/ksm_tests.c
596
static int ksm_merge_time(int merge_type, int mapping, int prot, int timeout, size_t map_size)
tools/testing/selftests/mm/ksm_tests.c
604
map_ptr = allocate_memory(NULL, prot, mapping, '*', map_size);
tools/testing/selftests/mm/ksm_tests.c
637
static int ksm_unmerge_time(int merge_type, int mapping, int prot, int timeout, size_t map_size)
tools/testing/selftests/mm/ksm_tests.c
645
map_ptr = allocate_memory(NULL, prot, mapping, '*', map_size);
tools/testing/selftests/mm/ksm_tests.c
684
static int ksm_cow_time(int merge_type, int mapping, int prot, int timeout, size_t page_size)
tools/testing/selftests/mm/ksm_tests.c
693
map_ptr = allocate_memory(NULL, prot, mapping, '*', page_size * page_count);
tools/testing/selftests/namespaces/file_handle_test.c
1015
char mapping[64];
tools/testing/selftests/namespaces/file_handle_test.c
1016
snprintf(mapping, sizeof(mapping), "0 %d 1", getuid());
tools/testing/selftests/namespaces/file_handle_test.c
1017
write(uid_map_fd, mapping, strlen(mapping));
tools/testing/selftests/namespaces/file_handle_test.c
1020
snprintf(mapping, sizeof(mapping), "0 %d 1", getgid());
tools/testing/selftests/namespaces/file_handle_test.c
1021
write(gid_map_fd, mapping, strlen(mapping));
tools/testing/selftests/namespaces/file_handle_test.c
1144
char mapping[64];
tools/testing/selftests/namespaces/file_handle_test.c
1145
snprintf(mapping, sizeof(mapping), "0 %d 1", getuid());
tools/testing/selftests/namespaces/file_handle_test.c
1146
write(uid_map_fd, mapping, strlen(mapping));
tools/testing/selftests/namespaces/file_handle_test.c
1149
snprintf(mapping, sizeof(mapping), "0 %d 1", getgid());
tools/testing/selftests/namespaces/file_handle_test.c
1150
write(gid_map_fd, mapping, strlen(mapping));
tools/testing/selftests/namespaces/file_handle_test.c
1293
char mapping[64];
tools/testing/selftests/namespaces/file_handle_test.c
1294
snprintf(mapping, sizeof(mapping), "0 %d 1", getuid());
tools/testing/selftests/namespaces/file_handle_test.c
1295
write(uid_map_fd, mapping, strlen(mapping));
tools/testing/selftests/namespaces/file_handle_test.c
1298
snprintf(mapping, sizeof(mapping), "0 %d 1", getgid());
tools/testing/selftests/namespaces/file_handle_test.c
1299
write(gid_map_fd, mapping, strlen(mapping));
tools/testing/selftests/namespaces/file_handle_test.c
496
char mapping[64];
tools/testing/selftests/namespaces/file_handle_test.c
497
snprintf(mapping, sizeof(mapping), "0 %d 1", getuid());
tools/testing/selftests/namespaces/file_handle_test.c
498
write(uid_map_fd, mapping, strlen(mapping));
tools/testing/selftests/namespaces/file_handle_test.c
501
snprintf(mapping, sizeof(mapping), "0 %d 1", getgid());
tools/testing/selftests/namespaces/file_handle_test.c
502
write(gid_map_fd, mapping, strlen(mapping));
tools/testing/selftests/namespaces/file_handle_test.c
625
char mapping[64];
tools/testing/selftests/namespaces/file_handle_test.c
626
snprintf(mapping, sizeof(mapping), "0 %d 1", getuid());
tools/testing/selftests/namespaces/file_handle_test.c
627
write(uid_map_fd, mapping, strlen(mapping));
tools/testing/selftests/namespaces/file_handle_test.c
630
snprintf(mapping, sizeof(mapping), "0 %d 1", getgid());
tools/testing/selftests/namespaces/file_handle_test.c
631
write(gid_map_fd, mapping, strlen(mapping));
tools/testing/selftests/namespaces/file_handle_test.c
754
char mapping[64];
tools/testing/selftests/namespaces/file_handle_test.c
755
snprintf(mapping, sizeof(mapping), "0 %d 1", getuid());
tools/testing/selftests/namespaces/file_handle_test.c
756
write(uid_map_fd, mapping, strlen(mapping));
tools/testing/selftests/namespaces/file_handle_test.c
759
snprintf(mapping, sizeof(mapping), "0 %d 1", getgid());
tools/testing/selftests/namespaces/file_handle_test.c
760
write(gid_map_fd, mapping, strlen(mapping));
tools/testing/selftests/namespaces/file_handle_test.c
883
char mapping[64];
tools/testing/selftests/namespaces/file_handle_test.c
884
snprintf(mapping, sizeof(mapping), "0 %d 1", getuid());
tools/testing/selftests/namespaces/file_handle_test.c
885
write(uid_map_fd, mapping, strlen(mapping));
tools/testing/selftests/namespaces/file_handle_test.c
888
snprintf(mapping, sizeof(mapping), "0 %d 1", getgid());
tools/testing/selftests/namespaces/file_handle_test.c
889
write(gid_map_fd, mapping, strlen(mapping));
tools/testing/selftests/namespaces/ns_active_ref_test.c
326
char mapping[64];
tools/testing/selftests/namespaces/ns_active_ref_test.c
327
snprintf(mapping, sizeof(mapping), "0 %d 1", getuid());
tools/testing/selftests/namespaces/ns_active_ref_test.c
328
write(uid_map_fd, mapping, strlen(mapping));
tools/testing/selftests/namespaces/ns_active_ref_test.c
331
snprintf(mapping, sizeof(mapping), "0 %d 1", getgid());
tools/testing/selftests/namespaces/ns_active_ref_test.c
332
write(gid_map_fd, mapping, strlen(mapping));
tools/testing/selftests/vfio/vfio_dma_mapping_test.c
139
struct iommu_mapping mapping;
tools/testing/selftests/vfio/vfio_dma_mapping_test.c
160
rc = iommu_mapping_get(device_bdf, region.iova, &mapping);
tools/testing/selftests/vfio/vfio_dma_mapping_test.c
169
printf("PGD: 0x%016lx\n", mapping.pgd);
tools/testing/selftests/vfio/vfio_dma_mapping_test.c
170
printf("P4D: 0x%016lx\n", mapping.p4d);
tools/testing/selftests/vfio/vfio_dma_mapping_test.c
171
printf("PUD: 0x%016lx\n", mapping.pud);
tools/testing/selftests/vfio/vfio_dma_mapping_test.c
172
printf("PMD: 0x%016lx\n", mapping.pmd);
tools/testing/selftests/vfio/vfio_dma_mapping_test.c
173
printf("PTE: 0x%016lx\n", mapping.pte);
tools/testing/selftests/vfio/vfio_dma_mapping_test.c
177
ASSERT_NE(0, mapping.pte);
tools/testing/selftests/vfio/vfio_dma_mapping_test.c
180
ASSERT_EQ(0, mapping.pte);
tools/testing/selftests/vfio/vfio_dma_mapping_test.c
181
ASSERT_NE(0, mapping.pmd);
tools/testing/selftests/vfio/vfio_dma_mapping_test.c
184
ASSERT_EQ(0, mapping.pte);
tools/testing/selftests/vfio/vfio_dma_mapping_test.c
185
ASSERT_EQ(0, mapping.pmd);
tools/testing/selftests/vfio/vfio_dma_mapping_test.c
186
ASSERT_NE(0, mapping.pud);
tools/testing/selftests/vfio/vfio_dma_mapping_test.c
198
ASSERT_NE(0, iommu_mapping_get(device_bdf, region.iova, &mapping));
tools/testing/selftests/vfio/vfio_dma_mapping_test.c
39
struct iommu_mapping *mapping)
tools/testing/selftests/vfio/vfio_dma_mapping_test.c
67
memset(mapping, 0, sizeof(*mapping));
tools/testing/selftests/vfio/vfio_dma_mapping_test.c
68
parse_next_value(&rest, &mapping->pgd);
tools/testing/selftests/vfio/vfio_dma_mapping_test.c
69
parse_next_value(&rest, &mapping->p4d);
tools/testing/selftests/vfio/vfio_dma_mapping_test.c
70
parse_next_value(&rest, &mapping->pud);
tools/testing/selftests/vfio/vfio_dma_mapping_test.c
71
parse_next_value(&rest, &mapping->pmd);
tools/testing/selftests/vfio/vfio_dma_mapping_test.c
72
parse_next_value(&rest, &mapping->pte);
tools/testing/selftests/vfio/vfio_dma_mapping_test.c
87
struct iommu_mapping *mapping)
tools/testing/selftests/vfio/vfio_dma_mapping_test.c
90
return intel_iommu_mapping_get(bdf, iova, mapping);
tools/testing/selftests/x86/test_shadow_stack.c
544
void *mapping;
tools/testing/selftests/x86/test_shadow_stack.c
579
cur->mapping = test_map;
tools/testing/selftests/x86/test_shadow_stack.c
588
munmap(cur->mapping, PAGE_SIZE);
tools/testing/selftests/x86/test_shadow_stack.c
628
cur->mapping = test_map;
tools/testing/selftests/x86/test_shadow_stack.c
643
munmap(cur->mapping, PAGE_SIZE);
tools/testing/vma/include/dup.h
1043
static inline void mapping_allow_writable(struct address_space *mapping)
tools/testing/vma/include/dup.h
1045
atomic_inc(&mapping->i_mmap_writable);
tools/testing/vma/include/dup.h
1280
static inline int mapping_map_writable(struct address_space *mapping)
tools/testing/vma/include/dup.h
1282
return atomic_inc_unless_negative(&mapping->i_mmap_writable) ?
tools/testing/vma/include/stubs.h
138
static inline void i_mmap_unlock_write(struct address_space *mapping)
tools/testing/vma/include/stubs.h
190
static inline bool mapping_can_writeback(struct address_space *mapping)
tools/testing/vma/include/stubs.h
268
static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
tools/testing/vma/include/stubs.h
291
static inline void i_mmap_lock_write(struct address_space *mapping)
tools/testing/vma/include/stubs.h
399
static inline void mapping_unmap_writable(struct address_space *mapping)
tools/testing/vma/include/stubs.h
403
static inline void flush_dcache_mmap_lock(struct address_space *mapping)
virt/kvm/guest_memfd.c
254
struct address_space *mapping = inode->i_mapping;
virt/kvm/guest_memfd.c
262
filemap_invalidate_lock_shared(mapping);
virt/kvm/guest_memfd.c
294
filemap_invalidate_unlock_shared(mapping);
virt/kvm/guest_memfd.c
42
#define kvm_gmem_for_each_file(f, mapping) \
virt/kvm/guest_memfd.c
43
list_for_each_entry(f, &(mapping)->i_private_list, entry)
virt/kvm/guest_memfd.c
490
static int kvm_gmem_migrate_folio(struct address_space *mapping,
virt/kvm/guest_memfd.c
498
static int kvm_gmem_error_folio(struct address_space *mapping, struct folio *folio)
virt/kvm/guest_memfd.c
502
filemap_invalidate_lock_shared(mapping);
virt/kvm/guest_memfd.c
507
kvm_gmem_invalidate_begin(mapping->host, start, end);
virt/kvm/guest_memfd.c
518
kvm_gmem_invalidate_end(mapping->host, start, end);
virt/kvm/guest_memfd.c
520
filemap_invalidate_unlock_shared(mapping);